function_name
stringlengths 1
63
| docstring
stringlengths 50
5.89k
| masked_code
stringlengths 50
882k
| implementation
stringlengths 169
12.9k
| start_line
int32 1
14.6k
| end_line
int32 16
14.6k
| file_content
stringlengths 274
882k
|
|---|---|---|---|---|---|---|
show_info
|
Show an info message to the user and log it
Args:
msg (str): User message to print on the console
|
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
# MASKED: show_info function (lines 201-210)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
| 201
| 210
|
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
show_warning
|
Show a warning to the user and log it
Args:
msg (str): User message to print on the console
|
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
# MASKED: show_warning function (lines 212-221)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
| 212
| 221
|
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
regress
|
Runs a linear regression to decompose the dependent variable into the explanatory variables
returns an object of type statsmodel's RegressionResults on which you can call
.summary() to print a full summary
.params for the coefficients
.tvalues and .pvalues for the significance levels
.rsquared_adj and .rsquared for quality of fit
|
import pandas as pd
import numpy as np
from numpy.linalg import inv
def get_ffme_returns():
"""
Load the Fama-French Dataset for the returns of the Top and Bottom Deciles by MarketCap
"""
me_m = pd.read_csv("data/Portfolios_Formed_on_ME_monthly_EW.csv",
header=0, index_col=0, na_values=-99.99)
rets = me_m[['Lo 10', 'Hi 10']]
rets.columns = ['SmallCap', 'LargeCap']
rets = rets/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_fff_returns():
"""
Load the Fama-French Research Factor Monthly Dataset
"""
rets = pd.read_csv("data/F-F_Research_Data_Factors_m.csv",
header=0, index_col=0, na_values=-99.99)/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_hfi_returns():
"""
Load and format the EDHEC Hedge Fund Index Returns
"""
hfi = pd.read_csv("data/edhec-hedgefundindices.csv",
header=0, index_col=0, parse_dates=True)
hfi = hfi/100
hfi.index = hfi.index.to_period('M')
return hfi
def get_ind_file(filetype, weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios files
Variant is a tuple of (weighting, size) where:
weighting is one of "ew", "vw"
number of inds is 30 or 49
"""
if filetype is "returns":
name = f"{weighting}_rets"
divisor = 100
elif filetype is "nfirms":
name = "nfirms"
divisor = 1
elif filetype is "size":
name = "size"
divisor = 1
else:
raise ValueError(f"filetype must be one of: returns, nfirms, size")
ind = pd.read_csv(f"data/ind{n_inds}_m_{name}.csv", header=0, index_col=0, na_values=-99.99)/divisor
ind.index = pd.to_datetime(ind.index, format="%Y%m").to_period('M')
ind.columns = ind.columns.str.strip()
return ind
def get_ind_returns(weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios Monthly Returns
"""
return get_ind_file("returns", weighting=weighting, n_inds=n_inds)
def get_ind_nfirms(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average number of Firms
"""
return get_ind_file("nfirms", n_inds=n_inds)
def get_ind_size(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average size (market cap)
"""
return get_ind_file("size", n_inds=n_inds)
def get_ind_market_caps(n_inds=30, weights=False):
"""
Load the industry portfolio data and derive the market caps
"""
ind_nfirms = get_ind_nfirms(n_inds=n_inds)
ind_size = get_ind_size(n_inds=n_inds)
ind_mktcap = ind_nfirms * ind_size
if weights:
total_mktcap = ind_mktcap.sum(axis=1)
ind_capweight = ind_mktcap.divide(total_mktcap, axis="rows")
return ind_capweight
#else
return ind_mktcap
def get_total_market_index_returns(n_inds=30):
"""
Load the 30 industry portfolio data and derive the returns of a capweighted total market index
"""
ind_capweight = get_ind_market_caps(n_inds=n_inds)
ind_return = get_ind_returns(weighting="vw", n_inds=n_inds)
total_market_return = (ind_capweight * ind_return).sum(axis="columns")
return total_market_return
def skewness(r):
"""
Alternative to scipy.stats.skew()
Computes the skewness of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**3).mean()
return exp/sigma_r**3
def kurtosis(r):
"""
Alternative to scipy.stats.kurtosis()
Computes the kurtosis of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**4).mean()
return exp/sigma_r**4
def compound(r):
"""
returns the result of compounding the set of returns in r
"""
return np.expm1(np.log1p(r).sum())
def annualize_rets(r):
"""
Annualizes a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
compounded_growth = (1+r_valid).prod()
return compounded_growth**(1/years_fraction)-1
def annualize_vol(r):
"""
Annualizes the vol of a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
return r_valid.std()*((periods_per_year)**0.5)
def sharpe_ratio(r, riskfree_rate):
"""
Computes the annualized sharpe ratio of a set of returns
"""
# convert the annual riskfree rate to per period
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
rf_per_period = (1+riskfree_rate)**(1/periods_per_year)-1
excess_ret = r - rf_per_period
ann_ex_ret = annualize_rets(excess_ret)
ann_vol = annualize_vol(r)
return ann_ex_ret/ann_vol
import scipy.stats
def is_normal(r, level=0.01):
"""
Applies the Jarque-Bera test to determine if a Series is normal or not
Test is applied at the 1% level by default
Returns True if the hypothesis of normality is accepted, False otherwise
"""
if isinstance(r, pd.DataFrame):
return r.aggregate(is_normal)
else:
statistic, p_value = scipy.stats.jarque_bera(r)
return p_value > level
def drawdown(return_series: pd.Series):
"""Takes a time series of asset returns.
returns a DataFrame with columns for
the wealth index,
the previous peaks, and
the percentage drawdown
"""
wealth_index = 1000*(1+return_series).cumprod()
previous_peaks = wealth_index.cummax()
drawdowns = (wealth_index - previous_peaks)/previous_peaks
return pd.DataFrame({"Wealth": wealth_index,
"Previous Peak": previous_peaks,
"Drawdown": drawdowns})
def semideviation(r):
"""
Returns the semideviation aka negative semideviation of r
r must be a Series or a DataFrame, else raises a TypeError
"""
if isinstance(r, pd.Series):
is_negative = r < 0
return r[is_negative].std(ddof=0)
elif isinstance(r, pd.DataFrame):
return r.aggregate(semideviation)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def var_historic(r, level=5):
"""
Returns the historic Value at Risk at a specified level
i.e. returns the number such that "level" percent of the returns
fall below that number, and the (100-level) percent are above
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.DataFrame):
return r.aggregate(var_historic, level=level)
elif isinstance(r, pd.Series):
return -np.percentile(r, level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def cvar_historic(r, level=5):
"""
Computes the Conditional VaR of Series or DataFrame
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.Series):
is_beyond = r <= -var_historic(r, level=level)
return -r[is_beyond].mean()
elif isinstance(r, pd.DataFrame):
return r.aggregate(cvar_historic, level=level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
from scipy.stats import norm
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gauusian VaR of a Series or DataFrame
If "modified" is True, then the modified VaR is returned,
using the Cornish-Fisher modification
"""
# compute the Z score assuming it was Gaussian
r = r[(r!=0) & (r.notnull())]
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1)*s/6 +
(z**3 -3*z)*(k-3)/24 -
(2*z**3 - 5*z)*(s**2)/36
)
return -(r.mean() + z*r.std(ddof=0))
def portfolio_return(weights, returns):
"""
Computes the return on a portfolio from constituent returns and weights
weights are a numpy array or Nx1 matrix and returns are a numpy array or Nx1 matrix
"""
return weights.T @ returns
def portfolio_vol(weights, covmat):
"""
Computes the vol of a portfolio from a covariance matrix and constituent weights
weights are a numpy array or N x 1 maxtrix and covmat is an N x N matrix
"""
vol = (weights.T @ covmat @ weights)**0.5
return vol
def plot_ef2(n_points, er, cov):
"""
Plots the 2-asset efficient frontier
"""
if er.shape[0] != 2 or er.shape[0] != 2:
raise ValueError("plot_ef2 can only plot 2-asset frontiers")
weights = [np.array([w, 1-w]) for w in np.linspace(0, 1, n_points)]
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
return ef.plot.line(x="Volatility", y="Returns", style=".-")
from scipy.optimize import minimize
def minimize_vol(target_return, er, cov):
"""
Returns the optimal weights that achieve the target return
given a set of expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
return_is_target = {'type': 'eq',
'args': (er,),
'fun': lambda weights, er: target_return - portfolio_return(weights,er)
}
weights = minimize(portfolio_vol, init_guess,
args=(cov,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,return_is_target),
bounds=bounds)
return weights.x
def tracking_error(r_a, r_b):
"""
Returns the Tracking Error between the two return series
"""
return np.sqrt(((r_a - r_b)**2).sum())
def msr(riskfree_rate, er, cov):
"""
Returns the weights of the portfolio that gives you the maximum sharpe ratio
given the riskfree rate and expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def neg_sharpe(weights, riskfree_rate, er, cov):
"""
Returns the negative of the sharpe ratio
of the given portfolio
"""
r = portfolio_return(weights, er)
vol = portfolio_vol(weights, cov)
return -(r - riskfree_rate)/vol
weights = minimize(neg_sharpe, init_guess,
args=(riskfree_rate, er, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def gmv(cov):
"""
Returns the weights of the Global Minimum Volatility portfolio
given a covariance matrix
"""
n = cov.shape[0]
return msr(0, np.repeat(1, n), cov)
def optimal_weights(n_points, er, cov):
"""
Returns a list of weights that represent a grid of n_points on the efficient frontier
"""
target_rs = np.linspace(er.min(), er.max(), n_points)
weights = [minimize_vol(target_return, er, cov) for target_return in target_rs]
return weights
def plot_ef(n_points, er, cov, style='.-', legend=False, show_cml=False, riskfree_rate=0, show_ew=False, show_gmv=False):
"""
Plots the multi-asset efficient frontier
"""
weights = optimal_weights(n_points, er, cov)
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
ax = ef.plot.line(x="Volatility", y="Returns", style=style, legend=legend)
if show_cml:
ax.set_xlim(left = 0)
# get MSR
w_msr = msr(riskfree_rate, er, cov)
r_msr = portfolio_return(w_msr, er)
vol_msr = portfolio_vol(w_msr, cov)
# add CML
cml_x = [0, vol_msr]
cml_y = [riskfree_rate, r_msr]
ax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=10)
if show_ew:
n = er.shape[0]
w_ew = np.repeat(1/n, n)
r_ew = portfolio_return(w_ew, er)
vol_ew = portfolio_vol(w_ew, cov)
# add EW
ax.plot([vol_ew], [r_ew], color='goldenrod', marker='o', markersize=10)
if show_gmv:
w_gmv = gmv(cov)
r_gmv = portfolio_return(w_gmv, er)
vol_gmv = portfolio_vol(w_gmv, cov)
# add EW
ax.plot([vol_gmv], [r_gmv], color='midnightblue', marker='o', markersize=10)
return ax
def run_cppi(risky_r, safe_r=None, m=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None):
"""
Run a backtest of the CPPI strategy, given a set of returns for the risky asset
Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History
"""
# set up the CPPI parameters
dates = risky_r.index
n_steps = len(dates)
account_value = start
floor_value = start*floor
peak = account_value
if isinstance(risky_r, pd.Series):
risky_r = pd.DataFrame(risky_r, columns=["R"])
if safe_r is None:
safe_r = pd.DataFrame().reindex_like(risky_r)
safe_r.values[:] = riskfree_rate/12 # fast way to set all values to a number
# set up some DataFrames for saving intermediate values
account_history = pd.DataFrame().reindex_like(risky_r)
risky_w_history = pd.DataFrame().reindex_like(risky_r)
cushion_history = pd.DataFrame().reindex_like(risky_r)
floorval_history = pd.DataFrame().reindex_like(risky_r)
peak_history = pd.DataFrame().reindex_like(risky_r)
for step in range(n_steps):
if drawdown is not None:
peak = np.maximum(peak, account_value)
floor_value = peak*(1-drawdown)
cushion = (account_value - floor_value)/account_value
risky_w = m*cushion
risky_w = np.minimum(risky_w, 1)
risky_w = np.maximum(risky_w, 0)
safe_w = 1-risky_w
risky_alloc = account_value*risky_w
safe_alloc = account_value*safe_w
# recompute the new account value at the end of this step
account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])
# save the histories for analysis and plotting
cushion_history.iloc[step] = cushion
risky_w_history.iloc[step] = risky_w
account_history.iloc[step] = account_value
floorval_history.iloc[step] = floor_value
peak_history.iloc[step] = peak
risky_wealth = start*(1+risky_r).cumprod()
backtest_result = {
"Wealth": account_history,
"Risky Wealth": risky_wealth,
"Risk Budget": cushion_history,
"Risky Allocation": risky_w_history,
"m": m,
"start": start,
"floor": floor,
"risky_r":risky_r,
"safe_r": safe_r,
"drawdown": drawdown,
"peak": peak_history,
"floor": floorval_history
}
return backtest_result
def summary_stats(r, riskfree_rate=0.03):
"""
Return a DataFrame that contains aggregated summary stats for the returns in the columns of r
"""
ann_r = annualize_rets(r)
ann_vol = annualize_vol(r)
ann_sr = sharpe_ratio(r, riskfree_rate=riskfree_rate)
dd = r.aggregate(lambda r: drawdown(r).Drawdown.min())
skew = r.aggregate(skewness)
kurt = r.aggregate(kurtosis)
cf_var5 = r.aggregate(var_gaussian, modified=True)
hist_cvar5 = r.aggregate(cvar_historic)
return pd.DataFrame({
"Annualized Return": ann_r,
"Annualized Vol": ann_vol,
"Skewness": skew,
"Kurtosis": kurt,
"Cornish-Fisher VaR (5%)": cf_var5,
"Historic CVaR (5%)": hist_cvar5,
"Sharpe Ratio": ann_sr,
"Max Drawdown": dd
})
def gbm(n_years = 10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100.0, prices=True):
"""
Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo
:param n_years: The number of years to generate data for
:param n_paths: The number of scenarios/trajectories
:param mu: Annualized Drift, e.g. Market Return
:param sigma: Annualized Volatility
:param steps_per_year: granularity of the simulation
:param s_0: initial value
:return: a numpy array of n_paths columns and n_years*steps_per_year rows
"""
# Derive per-step Model Parameters from User Specifications
dt = 1/steps_per_year
n_steps = int(n_years*steps_per_year) + 1
# the standard way ...
# rets_plus_1 = np.random.normal(loc=mu*dt+1, scale=sigma*np.sqrt(dt), size=(n_steps, n_scenarios))
# without discretization error ...
rets_plus_1 = np.random.normal(loc=(1+mu)**dt, scale=(sigma*np.sqrt(dt)), size=(n_steps, n_scenarios))
rets_plus_1[0] = 1
ret_val = s_0*pd.DataFrame(rets_plus_1).cumprod() if prices else rets_plus_1-1
return ret_val
import statsmodels.api as sm
# MASKED: regress function (lines 569-583)
def portfolio_tracking_error(weights, ref_r, bb_r):
"""
returns the tracking error between the reference returns
and a portfolio of building block returns held with given weights
"""
return tracking_error(ref_r, (weights*bb_r).sum(axis=1))
def style_analysis(dependent_variable, explanatory_variables):
"""
Returns the optimal weights that minimizes the Tracking error between
a portfolio of the explanatory variables and the dependent variable
"""
n = explanatory_variables.shape[1]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
solution = minimize(portfolio_tracking_error, init_guess,
args=(dependent_variable, explanatory_variables,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
weights = pd.Series(solution.x, index=explanatory_variables.columns)
return weights
def ff_analysis(r, factors):
"""
Returns the loadings of r on the Fama French Factors
which can be read in using get_fff_returns()
the index of r must be a (not necessarily proper) subset of the index of factors
r is either a Series or a DataFrame
"""
if isinstance(r, pd.Series):
dependent_variable = r
explanatory_variables = factors.loc[r.index]
tilts = regress(dependent_variable, explanatory_variables).params
elif isinstance(r, pd.DataFrame):
tilts = pd.DataFrame({col: ff_analysis(r[col], factors) for col in r.columns})
else:
raise TypeError("r must be a Series or a DataFrame")
return tilts
def weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
If supplied a set of capweights and a capweight tether, it is applied and reweighted
"""
n = len(r.columns)
ew = pd.Series(1/n, index=r.columns)
if cap_weights is not None:
cw = cap_weights.loc[r.index[0]] # starting cap weight
## exclude microcaps
if microcap_threshold is not None and microcap_threshold > 0:
microcap = cw < microcap_threshold
ew[microcap] = 0
ew = ew/ew.sum()
#limit weight to a multiple of capweight
if max_cw_mult is not None and max_cw_mult > 0:
ew = np.minimum(ew, cw*max_cw_mult)
ew = ew/ew.sum() #reweight
return ew
def weight_cw(r, cap_weights, **kwargs):
"""
Returns the weights of the CW portfolio based on the time series of capweights
"""
w = cap_weights.loc[r.index[1]]
return w/w.sum()
def backtest_ws(r, estimation_window=60, weighting=weight_ew, verbose=False, **kwargs):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
# return windows
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window)]
weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]
# convert List of weights to DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window:].index, columns=r.columns)
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
def sample_cov(r, **kwargs):
"""
Returns the sample covariance of the supplied returns
"""
return r.cov()
def weight_gmv(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the GMV portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return gmv(est_cov)
def cc_cov(r, **kwargs):
"""
Estimates a covariance matrix by using the Elton/Gruber Constant Correlation model
"""
rhos = r.corr()
n = rhos.shape[0]
# this is a symmetric matrix with diagonals all 1 - so the mean correlation is ...
rho_bar = (rhos.values.sum()-n)/(n*(n-1))
ccor = np.full_like(rhos, rho_bar)
np.fill_diagonal(ccor, 1.)
sd = r.std()
return pd.DataFrame(ccor * np.outer(sd, sd), index=r.columns, columns=r.columns)
def shrinkage_cov(r, delta=0.5, **kwargs):
"""
Covariance estimator that shrinks between the Sample Covariance and the Constant Correlation Estimators
"""
prior = cc_cov(r, **kwargs)
sample = sample_cov(r, **kwargs)
return delta*prior + (1-delta)*sample
def risk_contribution(w,cov):
"""
Compute the contributions to risk of the constituents of a portfolio, given a set of portfolio weights and a covariance matrix
"""
total_portfolio_var = portfolio_vol(w,cov)**2
# Marginal contribution of each constituent
marginal_contrib = cov@w
risk_contrib = np.multiply(marginal_contrib,w.T)/total_portfolio_var
return risk_contrib
def target_risk_contributions(target_risk, cov):
"""
Returns the weights of the portfolio that gives you the weights such
that the contributions to portfolio risk are as close as possible to
the target_risk, given the covariance matrix
"""
n = cov.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def msd_risk(weights, target_risk, cov):
"""
Returns the Mean Squared Difference in risk contributions
between weights and target_risk
"""
w_contribs = risk_contribution(weights, cov)
return ((w_contribs-target_risk)**2).sum()
weights = minimize(msd_risk, init_guess,
args=(target_risk, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def equal_risk_contributions(cov):
"""
Returns the weights of the portfolio that equalizes the contributions
of the constituents based on the given covariance matrix
"""
n = cov.shape[0]
return target_risk_contributions(target_risk=np.repeat(1/n,n), cov=cov)
def weight_erc(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the ERC portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return equal_risk_contributions(est_cov)
def implied_returns(delta, sigma, w):
"""
Obtain the implied expected returns by reverse engineering the weights
Inputs:
delta: Risk Aversion Coefficient (scalar)
sigma: Variance-Covariance Matrix (N x N) as DataFrame
w: Portfolio weights (N x 1) as Series
Returns an N x 1 vector of Returns as Series
"""
ir = delta * sigma.dot(w).squeeze() # to get a series from a 1-column dataframe
ir.name = 'Implied Returns'
return ir
# Assumes that Omega is proportional to the variance of the prior
def proportional_prior(sigma, tau, p):
"""
Returns the He-Litterman simplified Omega
Inputs:
sigma: N x N Covariance Matrix as DataFrame
tau: a scalar
p: a K x N DataFrame linking Q and Assets
returns a P x P DataFrame, a Matrix representing Prior Uncertainties
"""
helit_omega = p.dot(tau * sigma).dot(p.T)
# Make a diag matrix from the diag elements of Omega
return pd.DataFrame(np.diag(np.diag(helit_omega.values)),index=p.index, columns=p.index)
def bl(w_prior, sigma_prior, p, q,
omega=None,
delta=2.5, tau=.02):
"""
# Computes the posterior expected returns based on
# the original black litterman reference model
#
# W.prior must be an N x 1 vector of weights, a Series
# Sigma.prior is an N x N covariance matrix, a DataFrame
# P must be a K x N matrix linking Q and the Assets, a DataFrame
# Q must be an K x 1 vector of views, a Series
# Omega must be a K x K matrix a DataFrame, or None
# if Omega is None, we assume it is
# proportional to variance of the prior
# delta and tau are scalars
"""
if omega is None:
omega = proportional_prior(sigma_prior, tau, p)
# Force w.prior and Q to be column vectors
# How many assets do we have?
N = w_prior.shape[0]
# And how many views?
K = q.shape[0]
# First, reverse-engineer the weights to get pi
pi = implied_returns(delta, sigma_prior, w_prior)
# Adjust (scale) Sigma by the uncertainty scaling factor
sigma_prior_scaled = tau * sigma_prior
# posterior estimate of the mean, use the "Master Formula"
# we use the versions that do not require
# Omega to be inverted (see previous section)
# this is easier to read if we use '@' for matrixmult instead of .dot()
# mu_bl = pi + sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ (q - p @ pi)
mu_bl = pi + sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega).dot(q - p.dot(pi).values))
# posterior estimate of uncertainty of mu.bl
# sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ p @ sigma_prior_scaled
sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega)).dot(p).dot(sigma_prior_scaled)
return (mu_bl, sigma_bl)
# for convenience and readability, define the inverse of a dataframe
def inverse(d):
"""
Invert the dataframe by inverting the underlying matrix
"""
return pd.DataFrame(inv(d.values), index=d.columns, columns=d.index)
def weight_msr(sigma, mu, scale=True):
"""
Optimal (Tangent/Max Sharpe Ratio) Portfolio weights
by using the Markowitz Optimization Procedure
Mu is the vector of Excess expected Returns
Sigma must be an N x N matrix as a DataFrame and Mu a column vector as a Series
This implements page 188 Equation 5.2.28 of
"The econometrics of financial markets" Campbell, Lo and Mackinlay.
"""
w = inverse(sigma).dot(mu)
if scale:
w = w/sum(w) # fix: this assumes all w is +ve
return w
|
def regress(dependent_variable, explanatory_variables, alpha=True):
"""
Runs a linear regression to decompose the dependent variable into the explanatory variables
returns an object of type statsmodel's RegressionResults on which you can call
.summary() to print a full summary
.params for the coefficients
.tvalues and .pvalues for the significance levels
.rsquared_adj and .rsquared for quality of fit
"""
if alpha:
explanatory_variables = explanatory_variables.copy()
explanatory_variables["Alpha"] = 1
lm = sm.OLS(dependent_variable, explanatory_variables).fit()
return lm
| 569
| 583
|
import pandas as pd
import numpy as np
from numpy.linalg import inv
def get_ffme_returns():
"""
Load the Fama-French Dataset for the returns of the Top and Bottom Deciles by MarketCap
"""
me_m = pd.read_csv("data/Portfolios_Formed_on_ME_monthly_EW.csv",
header=0, index_col=0, na_values=-99.99)
rets = me_m[['Lo 10', 'Hi 10']]
rets.columns = ['SmallCap', 'LargeCap']
rets = rets/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_fff_returns():
"""
Load the Fama-French Research Factor Monthly Dataset
"""
rets = pd.read_csv("data/F-F_Research_Data_Factors_m.csv",
header=0, index_col=0, na_values=-99.99)/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_hfi_returns():
"""
Load and format the EDHEC Hedge Fund Index Returns
"""
hfi = pd.read_csv("data/edhec-hedgefundindices.csv",
header=0, index_col=0, parse_dates=True)
hfi = hfi/100
hfi.index = hfi.index.to_period('M')
return hfi
def get_ind_file(filetype, weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios files
Variant is a tuple of (weighting, size) where:
weighting is one of "ew", "vw"
number of inds is 30 or 49
"""
if filetype is "returns":
name = f"{weighting}_rets"
divisor = 100
elif filetype is "nfirms":
name = "nfirms"
divisor = 1
elif filetype is "size":
name = "size"
divisor = 1
else:
raise ValueError(f"filetype must be one of: returns, nfirms, size")
ind = pd.read_csv(f"data/ind{n_inds}_m_{name}.csv", header=0, index_col=0, na_values=-99.99)/divisor
ind.index = pd.to_datetime(ind.index, format="%Y%m").to_period('M')
ind.columns = ind.columns.str.strip()
return ind
def get_ind_returns(weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios Monthly Returns
"""
return get_ind_file("returns", weighting=weighting, n_inds=n_inds)
def get_ind_nfirms(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average number of Firms
"""
return get_ind_file("nfirms", n_inds=n_inds)
def get_ind_size(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average size (market cap)
"""
return get_ind_file("size", n_inds=n_inds)
def get_ind_market_caps(n_inds=30, weights=False):
"""
Load the industry portfolio data and derive the market caps
"""
ind_nfirms = get_ind_nfirms(n_inds=n_inds)
ind_size = get_ind_size(n_inds=n_inds)
ind_mktcap = ind_nfirms * ind_size
if weights:
total_mktcap = ind_mktcap.sum(axis=1)
ind_capweight = ind_mktcap.divide(total_mktcap, axis="rows")
return ind_capweight
#else
return ind_mktcap
def get_total_market_index_returns(n_inds=30):
"""
Load the 30 industry portfolio data and derive the returns of a capweighted total market index
"""
ind_capweight = get_ind_market_caps(n_inds=n_inds)
ind_return = get_ind_returns(weighting="vw", n_inds=n_inds)
total_market_return = (ind_capweight * ind_return).sum(axis="columns")
return total_market_return
def skewness(r):
"""
Alternative to scipy.stats.skew()
Computes the skewness of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**3).mean()
return exp/sigma_r**3
def kurtosis(r):
"""
Alternative to scipy.stats.kurtosis()
Computes the kurtosis of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**4).mean()
return exp/sigma_r**4
def compound(r):
"""
returns the result of compounding the set of returns in r
"""
return np.expm1(np.log1p(r).sum())
def annualize_rets(r):
"""
Annualizes a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
compounded_growth = (1+r_valid).prod()
return compounded_growth**(1/years_fraction)-1
def annualize_vol(r):
"""
Annualizes the vol of a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
return r_valid.std()*((periods_per_year)**0.5)
def sharpe_ratio(r, riskfree_rate):
"""
Computes the annualized sharpe ratio of a set of returns
"""
# convert the annual riskfree rate to per period
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
rf_per_period = (1+riskfree_rate)**(1/periods_per_year)-1
excess_ret = r - rf_per_period
ann_ex_ret = annualize_rets(excess_ret)
ann_vol = annualize_vol(r)
return ann_ex_ret/ann_vol
import scipy.stats
def is_normal(r, level=0.01):
"""
Applies the Jarque-Bera test to determine if a Series is normal or not
Test is applied at the 1% level by default
Returns True if the hypothesis of normality is accepted, False otherwise
"""
if isinstance(r, pd.DataFrame):
return r.aggregate(is_normal)
else:
statistic, p_value = scipy.stats.jarque_bera(r)
return p_value > level
def drawdown(return_series: pd.Series):
"""Takes a time series of asset returns.
returns a DataFrame with columns for
the wealth index,
the previous peaks, and
the percentage drawdown
"""
wealth_index = 1000*(1+return_series).cumprod()
previous_peaks = wealth_index.cummax()
drawdowns = (wealth_index - previous_peaks)/previous_peaks
return pd.DataFrame({"Wealth": wealth_index,
"Previous Peak": previous_peaks,
"Drawdown": drawdowns})
def semideviation(r):
"""
Returns the semideviation aka negative semideviation of r
r must be a Series or a DataFrame, else raises a TypeError
"""
if isinstance(r, pd.Series):
is_negative = r < 0
return r[is_negative].std(ddof=0)
elif isinstance(r, pd.DataFrame):
return r.aggregate(semideviation)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def var_historic(r, level=5):
"""
Returns the historic Value at Risk at a specified level
i.e. returns the number such that "level" percent of the returns
fall below that number, and the (100-level) percent are above
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.DataFrame):
return r.aggregate(var_historic, level=level)
elif isinstance(r, pd.Series):
return -np.percentile(r, level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def cvar_historic(r, level=5):
"""
Computes the Conditional VaR of Series or DataFrame
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.Series):
is_beyond = r <= -var_historic(r, level=level)
return -r[is_beyond].mean()
elif isinstance(r, pd.DataFrame):
return r.aggregate(cvar_historic, level=level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
from scipy.stats import norm
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gauusian VaR of a Series or DataFrame
If "modified" is True, then the modified VaR is returned,
using the Cornish-Fisher modification
"""
# compute the Z score assuming it was Gaussian
r = r[(r!=0) & (r.notnull())]
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1)*s/6 +
(z**3 -3*z)*(k-3)/24 -
(2*z**3 - 5*z)*(s**2)/36
)
return -(r.mean() + z*r.std(ddof=0))
def portfolio_return(weights, returns):
"""
Computes the return on a portfolio from constituent returns and weights
weights are a numpy array or Nx1 matrix and returns are a numpy array or Nx1 matrix
"""
return weights.T @ returns
def portfolio_vol(weights, covmat):
"""
Computes the vol of a portfolio from a covariance matrix and constituent weights
weights are a numpy array or N x 1 maxtrix and covmat is an N x N matrix
"""
vol = (weights.T @ covmat @ weights)**0.5
return vol
def plot_ef2(n_points, er, cov):
"""
Plots the 2-asset efficient frontier
"""
if er.shape[0] != 2 or er.shape[0] != 2:
raise ValueError("plot_ef2 can only plot 2-asset frontiers")
weights = [np.array([w, 1-w]) for w in np.linspace(0, 1, n_points)]
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
return ef.plot.line(x="Volatility", y="Returns", style=".-")
from scipy.optimize import minimize
def minimize_vol(target_return, er, cov):
"""
Returns the optimal weights that achieve the target return
given a set of expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
return_is_target = {'type': 'eq',
'args': (er,),
'fun': lambda weights, er: target_return - portfolio_return(weights,er)
}
weights = minimize(portfolio_vol, init_guess,
args=(cov,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,return_is_target),
bounds=bounds)
return weights.x
def tracking_error(r_a, r_b):
"""
Returns the Tracking Error between the two return series
"""
return np.sqrt(((r_a - r_b)**2).sum())
def msr(riskfree_rate, er, cov):
"""
Returns the weights of the portfolio that gives you the maximum sharpe ratio
given the riskfree rate and expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def neg_sharpe(weights, riskfree_rate, er, cov):
"""
Returns the negative of the sharpe ratio
of the given portfolio
"""
r = portfolio_return(weights, er)
vol = portfolio_vol(weights, cov)
return -(r - riskfree_rate)/vol
weights = minimize(neg_sharpe, init_guess,
args=(riskfree_rate, er, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def gmv(cov):
"""
Returns the weights of the Global Minimum Volatility portfolio
given a covariance matrix
"""
n = cov.shape[0]
return msr(0, np.repeat(1, n), cov)
def optimal_weights(n_points, er, cov):
"""
Returns a list of weights that represent a grid of n_points on the efficient frontier
"""
target_rs = np.linspace(er.min(), er.max(), n_points)
weights = [minimize_vol(target_return, er, cov) for target_return in target_rs]
return weights
def plot_ef(n_points, er, cov, style='.-', legend=False, show_cml=False, riskfree_rate=0, show_ew=False, show_gmv=False):
"""
Plots the multi-asset efficient frontier
"""
weights = optimal_weights(n_points, er, cov)
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
ax = ef.plot.line(x="Volatility", y="Returns", style=style, legend=legend)
if show_cml:
ax.set_xlim(left = 0)
# get MSR
w_msr = msr(riskfree_rate, er, cov)
r_msr = portfolio_return(w_msr, er)
vol_msr = portfolio_vol(w_msr, cov)
# add CML
cml_x = [0, vol_msr]
cml_y = [riskfree_rate, r_msr]
ax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=10)
if show_ew:
n = er.shape[0]
w_ew = np.repeat(1/n, n)
r_ew = portfolio_return(w_ew, er)
vol_ew = portfolio_vol(w_ew, cov)
# add EW
ax.plot([vol_ew], [r_ew], color='goldenrod', marker='o', markersize=10)
if show_gmv:
w_gmv = gmv(cov)
r_gmv = portfolio_return(w_gmv, er)
vol_gmv = portfolio_vol(w_gmv, cov)
# add EW
ax.plot([vol_gmv], [r_gmv], color='midnightblue', marker='o', markersize=10)
return ax
def run_cppi(risky_r, safe_r=None, m=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None):
"""
Run a backtest of the CPPI strategy, given a set of returns for the risky asset
Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History
"""
# set up the CPPI parameters
dates = risky_r.index
n_steps = len(dates)
account_value = start
floor_value = start*floor
peak = account_value
if isinstance(risky_r, pd.Series):
risky_r = pd.DataFrame(risky_r, columns=["R"])
if safe_r is None:
safe_r = pd.DataFrame().reindex_like(risky_r)
safe_r.values[:] = riskfree_rate/12 # fast way to set all values to a number
# set up some DataFrames for saving intermediate values
account_history = pd.DataFrame().reindex_like(risky_r)
risky_w_history = pd.DataFrame().reindex_like(risky_r)
cushion_history = pd.DataFrame().reindex_like(risky_r)
floorval_history = pd.DataFrame().reindex_like(risky_r)
peak_history = pd.DataFrame().reindex_like(risky_r)
for step in range(n_steps):
if drawdown is not None:
peak = np.maximum(peak, account_value)
floor_value = peak*(1-drawdown)
cushion = (account_value - floor_value)/account_value
risky_w = m*cushion
risky_w = np.minimum(risky_w, 1)
risky_w = np.maximum(risky_w, 0)
safe_w = 1-risky_w
risky_alloc = account_value*risky_w
safe_alloc = account_value*safe_w
# recompute the new account value at the end of this step
account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])
# save the histories for analysis and plotting
cushion_history.iloc[step] = cushion
risky_w_history.iloc[step] = risky_w
account_history.iloc[step] = account_value
floorval_history.iloc[step] = floor_value
peak_history.iloc[step] = peak
risky_wealth = start*(1+risky_r).cumprod()
backtest_result = {
"Wealth": account_history,
"Risky Wealth": risky_wealth,
"Risk Budget": cushion_history,
"Risky Allocation": risky_w_history,
"m": m,
"start": start,
"floor": floor,
"risky_r":risky_r,
"safe_r": safe_r,
"drawdown": drawdown,
"peak": peak_history,
"floor": floorval_history
}
return backtest_result
def summary_stats(r, riskfree_rate=0.03):
"""
Return a DataFrame that contains aggregated summary stats for the returns in the columns of r
"""
ann_r = annualize_rets(r)
ann_vol = annualize_vol(r)
ann_sr = sharpe_ratio(r, riskfree_rate=riskfree_rate)
dd = r.aggregate(lambda r: drawdown(r).Drawdown.min())
skew = r.aggregate(skewness)
kurt = r.aggregate(kurtosis)
cf_var5 = r.aggregate(var_gaussian, modified=True)
hist_cvar5 = r.aggregate(cvar_historic)
return pd.DataFrame({
"Annualized Return": ann_r,
"Annualized Vol": ann_vol,
"Skewness": skew,
"Kurtosis": kurt,
"Cornish-Fisher VaR (5%)": cf_var5,
"Historic CVaR (5%)": hist_cvar5,
"Sharpe Ratio": ann_sr,
"Max Drawdown": dd
})
def gbm(n_years = 10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100.0, prices=True):
"""
Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo
:param n_years: The number of years to generate data for
:param n_paths: The number of scenarios/trajectories
:param mu: Annualized Drift, e.g. Market Return
:param sigma: Annualized Volatility
:param steps_per_year: granularity of the simulation
:param s_0: initial value
:return: a numpy array of n_paths columns and n_years*steps_per_year rows
"""
# Derive per-step Model Parameters from User Specifications
dt = 1/steps_per_year
n_steps = int(n_years*steps_per_year) + 1
# the standard way ...
# rets_plus_1 = np.random.normal(loc=mu*dt+1, scale=sigma*np.sqrt(dt), size=(n_steps, n_scenarios))
# without discretization error ...
rets_plus_1 = np.random.normal(loc=(1+mu)**dt, scale=(sigma*np.sqrt(dt)), size=(n_steps, n_scenarios))
rets_plus_1[0] = 1
ret_val = s_0*pd.DataFrame(rets_plus_1).cumprod() if prices else rets_plus_1-1
return ret_val
import statsmodels.api as sm
def regress(dependent_variable, explanatory_variables, alpha=True):
"""
Runs a linear regression to decompose the dependent variable into the explanatory variables
returns an object of type statsmodel's RegressionResults on which you can call
.summary() to print a full summary
.params for the coefficients
.tvalues and .pvalues for the significance levels
.rsquared_adj and .rsquared for quality of fit
"""
if alpha:
explanatory_variables = explanatory_variables.copy()
explanatory_variables["Alpha"] = 1
lm = sm.OLS(dependent_variable, explanatory_variables).fit()
return lm
def portfolio_tracking_error(weights, ref_r, bb_r):
"""
returns the tracking error between the reference returns
and a portfolio of building block returns held with given weights
"""
return tracking_error(ref_r, (weights*bb_r).sum(axis=1))
def style_analysis(dependent_variable, explanatory_variables):
"""
Returns the optimal weights that minimizes the Tracking error between
a portfolio of the explanatory variables and the dependent variable
"""
n = explanatory_variables.shape[1]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
solution = minimize(portfolio_tracking_error, init_guess,
args=(dependent_variable, explanatory_variables,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
weights = pd.Series(solution.x, index=explanatory_variables.columns)
return weights
def ff_analysis(r, factors):
"""
Returns the loadings of r on the Fama French Factors
which can be read in using get_fff_returns()
the index of r must be a (not necessarily proper) subset of the index of factors
r is either a Series or a DataFrame
"""
if isinstance(r, pd.Series):
dependent_variable = r
explanatory_variables = factors.loc[r.index]
tilts = regress(dependent_variable, explanatory_variables).params
elif isinstance(r, pd.DataFrame):
tilts = pd.DataFrame({col: ff_analysis(r[col], factors) for col in r.columns})
else:
raise TypeError("r must be a Series or a DataFrame")
return tilts
def weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
If supplied a set of capweights and a capweight tether, it is applied and reweighted
"""
n = len(r.columns)
ew = pd.Series(1/n, index=r.columns)
if cap_weights is not None:
cw = cap_weights.loc[r.index[0]] # starting cap weight
## exclude microcaps
if microcap_threshold is not None and microcap_threshold > 0:
microcap = cw < microcap_threshold
ew[microcap] = 0
ew = ew/ew.sum()
#limit weight to a multiple of capweight
if max_cw_mult is not None and max_cw_mult > 0:
ew = np.minimum(ew, cw*max_cw_mult)
ew = ew/ew.sum() #reweight
return ew
def weight_cw(r, cap_weights, **kwargs):
"""
Returns the weights of the CW portfolio based on the time series of capweights
"""
w = cap_weights.loc[r.index[1]]
return w/w.sum()
def backtest_ws(r, estimation_window=60, weighting=weight_ew, verbose=False, **kwargs):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
# return windows
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window)]
weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]
# convert List of weights to DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window:].index, columns=r.columns)
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
def sample_cov(r, **kwargs):
"""
Returns the sample covariance of the supplied returns
"""
return r.cov()
def weight_gmv(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the GMV portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return gmv(est_cov)
def cc_cov(r, **kwargs):
"""
Estimates a covariance matrix by using the Elton/Gruber Constant Correlation model
"""
rhos = r.corr()
n = rhos.shape[0]
# this is a symmetric matrix with diagonals all 1 - so the mean correlation is ...
rho_bar = (rhos.values.sum()-n)/(n*(n-1))
ccor = np.full_like(rhos, rho_bar)
np.fill_diagonal(ccor, 1.)
sd = r.std()
return pd.DataFrame(ccor * np.outer(sd, sd), index=r.columns, columns=r.columns)
def shrinkage_cov(r, delta=0.5, **kwargs):
"""
Covariance estimator that shrinks between the Sample Covariance and the Constant Correlation Estimators
"""
prior = cc_cov(r, **kwargs)
sample = sample_cov(r, **kwargs)
return delta*prior + (1-delta)*sample
def risk_contribution(w,cov):
"""
Compute the contributions to risk of the constituents of a portfolio, given a set of portfolio weights and a covariance matrix
"""
total_portfolio_var = portfolio_vol(w,cov)**2
# Marginal contribution of each constituent
marginal_contrib = cov@w
risk_contrib = np.multiply(marginal_contrib,w.T)/total_portfolio_var
return risk_contrib
def target_risk_contributions(target_risk, cov):
"""
Returns the weights of the portfolio that gives you the weights such
that the contributions to portfolio risk are as close as possible to
the target_risk, given the covariance matrix
"""
n = cov.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def msd_risk(weights, target_risk, cov):
"""
Returns the Mean Squared Difference in risk contributions
between weights and target_risk
"""
w_contribs = risk_contribution(weights, cov)
return ((w_contribs-target_risk)**2).sum()
weights = minimize(msd_risk, init_guess,
args=(target_risk, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def equal_risk_contributions(cov):
"""
Returns the weights of the portfolio that equalizes the contributions
of the constituents based on the given covariance matrix
"""
n = cov.shape[0]
return target_risk_contributions(target_risk=np.repeat(1/n,n), cov=cov)
def weight_erc(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the ERC portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return equal_risk_contributions(est_cov)
def implied_returns(delta, sigma, w):
"""
Obtain the implied expected returns by reverse engineering the weights
Inputs:
delta: Risk Aversion Coefficient (scalar)
sigma: Variance-Covariance Matrix (N x N) as DataFrame
w: Portfolio weights (N x 1) as Series
Returns an N x 1 vector of Returns as Series
"""
ir = delta * sigma.dot(w).squeeze() # to get a series from a 1-column dataframe
ir.name = 'Implied Returns'
return ir
# Assumes that Omega is proportional to the variance of the prior
def proportional_prior(sigma, tau, p):
"""
Returns the He-Litterman simplified Omega
Inputs:
sigma: N x N Covariance Matrix as DataFrame
tau: a scalar
p: a K x N DataFrame linking Q and Assets
returns a P x P DataFrame, a Matrix representing Prior Uncertainties
"""
helit_omega = p.dot(tau * sigma).dot(p.T)
# Make a diag matrix from the diag elements of Omega
return pd.DataFrame(np.diag(np.diag(helit_omega.values)),index=p.index, columns=p.index)
def bl(w_prior, sigma_prior, p, q,
omega=None,
delta=2.5, tau=.02):
"""
# Computes the posterior expected returns based on
# the original black litterman reference model
#
# W.prior must be an N x 1 vector of weights, a Series
# Sigma.prior is an N x N covariance matrix, a DataFrame
# P must be a K x N matrix linking Q and the Assets, a DataFrame
# Q must be an K x 1 vector of views, a Series
# Omega must be a K x K matrix a DataFrame, or None
# if Omega is None, we assume it is
# proportional to variance of the prior
# delta and tau are scalars
"""
if omega is None:
omega = proportional_prior(sigma_prior, tau, p)
# Force w.prior and Q to be column vectors
# How many assets do we have?
N = w_prior.shape[0]
# And how many views?
K = q.shape[0]
# First, reverse-engineer the weights to get pi
pi = implied_returns(delta, sigma_prior, w_prior)
# Adjust (scale) Sigma by the uncertainty scaling factor
sigma_prior_scaled = tau * sigma_prior
# posterior estimate of the mean, use the "Master Formula"
# we use the versions that do not require
# Omega to be inverted (see previous section)
# this is easier to read if we use '@' for matrixmult instead of .dot()
# mu_bl = pi + sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ (q - p @ pi)
mu_bl = pi + sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega).dot(q - p.dot(pi).values))
# posterior estimate of uncertainty of mu.bl
# sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ p @ sigma_prior_scaled
sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega)).dot(p).dot(sigma_prior_scaled)
return (mu_bl, sigma_bl)
# for convenience and readability, define the inverse of a dataframe
def inverse(d):
"""
Invert the dataframe by inverting the underlying matrix
"""
return pd.DataFrame(inv(d.values), index=d.columns, columns=d.index)
def weight_msr(sigma, mu, scale=True):
"""
Optimal (Tangent/Max Sharpe Ratio) Portfolio weights
by using the Markowitz Optimization Procedure
Mu is the vector of Excess expected Returns
Sigma must be an N x N matrix as a DataFrame and Mu a column vector as a Series
This implements page 188 Equation 5.2.28 of
"The econometrics of financial markets" Campbell, Lo and Mackinlay.
"""
w = inverse(sigma).dot(mu)
if scale:
w = w/sum(w) # fix: this assumes all w is +ve
return w
|
backtest_ws
|
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
|
import pandas as pd
import numpy as np
from numpy.linalg import inv
def get_ffme_returns():
"""
Load the Fama-French Dataset for the returns of the Top and Bottom Deciles by MarketCap
"""
me_m = pd.read_csv("data/Portfolios_Formed_on_ME_monthly_EW.csv",
header=0, index_col=0, na_values=-99.99)
rets = me_m[['Lo 10', 'Hi 10']]
rets.columns = ['SmallCap', 'LargeCap']
rets = rets/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_fff_returns():
"""
Load the Fama-French Research Factor Monthly Dataset
"""
rets = pd.read_csv("data/F-F_Research_Data_Factors_m.csv",
header=0, index_col=0, na_values=-99.99)/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_hfi_returns():
"""
Load and format the EDHEC Hedge Fund Index Returns
"""
hfi = pd.read_csv("data/edhec-hedgefundindices.csv",
header=0, index_col=0, parse_dates=True)
hfi = hfi/100
hfi.index = hfi.index.to_period('M')
return hfi
def get_ind_file(filetype, weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios files
Variant is a tuple of (weighting, size) where:
weighting is one of "ew", "vw"
number of inds is 30 or 49
"""
if filetype is "returns":
name = f"{weighting}_rets"
divisor = 100
elif filetype is "nfirms":
name = "nfirms"
divisor = 1
elif filetype is "size":
name = "size"
divisor = 1
else:
raise ValueError(f"filetype must be one of: returns, nfirms, size")
ind = pd.read_csv(f"data/ind{n_inds}_m_{name}.csv", header=0, index_col=0, na_values=-99.99)/divisor
ind.index = pd.to_datetime(ind.index, format="%Y%m").to_period('M')
ind.columns = ind.columns.str.strip()
return ind
def get_ind_returns(weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios Monthly Returns
"""
return get_ind_file("returns", weighting=weighting, n_inds=n_inds)
def get_ind_nfirms(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average number of Firms
"""
return get_ind_file("nfirms", n_inds=n_inds)
def get_ind_size(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average size (market cap)
"""
return get_ind_file("size", n_inds=n_inds)
def get_ind_market_caps(n_inds=30, weights=False):
"""
Load the industry portfolio data and derive the market caps
"""
ind_nfirms = get_ind_nfirms(n_inds=n_inds)
ind_size = get_ind_size(n_inds=n_inds)
ind_mktcap = ind_nfirms * ind_size
if weights:
total_mktcap = ind_mktcap.sum(axis=1)
ind_capweight = ind_mktcap.divide(total_mktcap, axis="rows")
return ind_capweight
#else
return ind_mktcap
def get_total_market_index_returns(n_inds=30):
"""
Load the 30 industry portfolio data and derive the returns of a capweighted total market index
"""
ind_capweight = get_ind_market_caps(n_inds=n_inds)
ind_return = get_ind_returns(weighting="vw", n_inds=n_inds)
total_market_return = (ind_capweight * ind_return).sum(axis="columns")
return total_market_return
def skewness(r):
"""
Alternative to scipy.stats.skew()
Computes the skewness of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**3).mean()
return exp/sigma_r**3
def kurtosis(r):
"""
Alternative to scipy.stats.kurtosis()
Computes the kurtosis of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**4).mean()
return exp/sigma_r**4
def compound(r):
"""
returns the result of compounding the set of returns in r
"""
return np.expm1(np.log1p(r).sum())
def annualize_rets(r):
"""
Annualizes a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
compounded_growth = (1+r_valid).prod()
return compounded_growth**(1/years_fraction)-1
def annualize_vol(r):
"""
Annualizes the vol of a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
return r_valid.std()*((periods_per_year)**0.5)
def sharpe_ratio(r, riskfree_rate):
"""
Computes the annualized sharpe ratio of a set of returns
"""
# convert the annual riskfree rate to per period
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
rf_per_period = (1+riskfree_rate)**(1/periods_per_year)-1
excess_ret = r - rf_per_period
ann_ex_ret = annualize_rets(excess_ret)
ann_vol = annualize_vol(r)
return ann_ex_ret/ann_vol
import scipy.stats
def is_normal(r, level=0.01):
"""
Applies the Jarque-Bera test to determine if a Series is normal or not
Test is applied at the 1% level by default
Returns True if the hypothesis of normality is accepted, False otherwise
"""
if isinstance(r, pd.DataFrame):
return r.aggregate(is_normal)
else:
statistic, p_value = scipy.stats.jarque_bera(r)
return p_value > level
def drawdown(return_series: pd.Series):
"""Takes a time series of asset returns.
returns a DataFrame with columns for
the wealth index,
the previous peaks, and
the percentage drawdown
"""
wealth_index = 1000*(1+return_series).cumprod()
previous_peaks = wealth_index.cummax()
drawdowns = (wealth_index - previous_peaks)/previous_peaks
return pd.DataFrame({"Wealth": wealth_index,
"Previous Peak": previous_peaks,
"Drawdown": drawdowns})
def semideviation(r):
"""
Returns the semideviation aka negative semideviation of r
r must be a Series or a DataFrame, else raises a TypeError
"""
if isinstance(r, pd.Series):
is_negative = r < 0
return r[is_negative].std(ddof=0)
elif isinstance(r, pd.DataFrame):
return r.aggregate(semideviation)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def var_historic(r, level=5):
"""
Returns the historic Value at Risk at a specified level
i.e. returns the number such that "level" percent of the returns
fall below that number, and the (100-level) percent are above
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.DataFrame):
return r.aggregate(var_historic, level=level)
elif isinstance(r, pd.Series):
return -np.percentile(r, level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def cvar_historic(r, level=5):
"""
Computes the Conditional VaR of Series or DataFrame
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.Series):
is_beyond = r <= -var_historic(r, level=level)
return -r[is_beyond].mean()
elif isinstance(r, pd.DataFrame):
return r.aggregate(cvar_historic, level=level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
from scipy.stats import norm
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gauusian VaR of a Series or DataFrame
If "modified" is True, then the modified VaR is returned,
using the Cornish-Fisher modification
"""
# compute the Z score assuming it was Gaussian
r = r[(r!=0) & (r.notnull())]
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1)*s/6 +
(z**3 -3*z)*(k-3)/24 -
(2*z**3 - 5*z)*(s**2)/36
)
return -(r.mean() + z*r.std(ddof=0))
def portfolio_return(weights, returns):
"""
Computes the return on a portfolio from constituent returns and weights
weights are a numpy array or Nx1 matrix and returns are a numpy array or Nx1 matrix
"""
return weights.T @ returns
def portfolio_vol(weights, covmat):
"""
Computes the vol of a portfolio from a covariance matrix and constituent weights
weights are a numpy array or N x 1 maxtrix and covmat is an N x N matrix
"""
vol = (weights.T @ covmat @ weights)**0.5
return vol
def plot_ef2(n_points, er, cov):
"""
Plots the 2-asset efficient frontier
"""
if er.shape[0] != 2 or er.shape[0] != 2:
raise ValueError("plot_ef2 can only plot 2-asset frontiers")
weights = [np.array([w, 1-w]) for w in np.linspace(0, 1, n_points)]
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
return ef.plot.line(x="Volatility", y="Returns", style=".-")
from scipy.optimize import minimize
def minimize_vol(target_return, er, cov):
"""
Returns the optimal weights that achieve the target return
given a set of expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
return_is_target = {'type': 'eq',
'args': (er,),
'fun': lambda weights, er: target_return - portfolio_return(weights,er)
}
weights = minimize(portfolio_vol, init_guess,
args=(cov,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,return_is_target),
bounds=bounds)
return weights.x
def tracking_error(r_a, r_b):
"""
Returns the Tracking Error between the two return series
"""
return np.sqrt(((r_a - r_b)**2).sum())
def msr(riskfree_rate, er, cov):
"""
Returns the weights of the portfolio that gives you the maximum sharpe ratio
given the riskfree rate and expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def neg_sharpe(weights, riskfree_rate, er, cov):
"""
Returns the negative of the sharpe ratio
of the given portfolio
"""
r = portfolio_return(weights, er)
vol = portfolio_vol(weights, cov)
return -(r - riskfree_rate)/vol
weights = minimize(neg_sharpe, init_guess,
args=(riskfree_rate, er, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def gmv(cov):
"""
Returns the weights of the Global Minimum Volatility portfolio
given a covariance matrix
"""
n = cov.shape[0]
return msr(0, np.repeat(1, n), cov)
def optimal_weights(n_points, er, cov):
"""
Returns a list of weights that represent a grid of n_points on the efficient frontier
"""
target_rs = np.linspace(er.min(), er.max(), n_points)
weights = [minimize_vol(target_return, er, cov) for target_return in target_rs]
return weights
def plot_ef(n_points, er, cov, style='.-', legend=False, show_cml=False, riskfree_rate=0, show_ew=False, show_gmv=False):
"""
Plots the multi-asset efficient frontier
"""
weights = optimal_weights(n_points, er, cov)
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
ax = ef.plot.line(x="Volatility", y="Returns", style=style, legend=legend)
if show_cml:
ax.set_xlim(left = 0)
# get MSR
w_msr = msr(riskfree_rate, er, cov)
r_msr = portfolio_return(w_msr, er)
vol_msr = portfolio_vol(w_msr, cov)
# add CML
cml_x = [0, vol_msr]
cml_y = [riskfree_rate, r_msr]
ax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=10)
if show_ew:
n = er.shape[0]
w_ew = np.repeat(1/n, n)
r_ew = portfolio_return(w_ew, er)
vol_ew = portfolio_vol(w_ew, cov)
# add EW
ax.plot([vol_ew], [r_ew], color='goldenrod', marker='o', markersize=10)
if show_gmv:
w_gmv = gmv(cov)
r_gmv = portfolio_return(w_gmv, er)
vol_gmv = portfolio_vol(w_gmv, cov)
# add EW
ax.plot([vol_gmv], [r_gmv], color='midnightblue', marker='o', markersize=10)
return ax
def run_cppi(risky_r, safe_r=None, m=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None):
"""
Run a backtest of the CPPI strategy, given a set of returns for the risky asset
Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History
"""
# set up the CPPI parameters
dates = risky_r.index
n_steps = len(dates)
account_value = start
floor_value = start*floor
peak = account_value
if isinstance(risky_r, pd.Series):
risky_r = pd.DataFrame(risky_r, columns=["R"])
if safe_r is None:
safe_r = pd.DataFrame().reindex_like(risky_r)
safe_r.values[:] = riskfree_rate/12 # fast way to set all values to a number
# set up some DataFrames for saving intermediate values
account_history = pd.DataFrame().reindex_like(risky_r)
risky_w_history = pd.DataFrame().reindex_like(risky_r)
cushion_history = pd.DataFrame().reindex_like(risky_r)
floorval_history = pd.DataFrame().reindex_like(risky_r)
peak_history = pd.DataFrame().reindex_like(risky_r)
for step in range(n_steps):
if drawdown is not None:
peak = np.maximum(peak, account_value)
floor_value = peak*(1-drawdown)
cushion = (account_value - floor_value)/account_value
risky_w = m*cushion
risky_w = np.minimum(risky_w, 1)
risky_w = np.maximum(risky_w, 0)
safe_w = 1-risky_w
risky_alloc = account_value*risky_w
safe_alloc = account_value*safe_w
# recompute the new account value at the end of this step
account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])
# save the histories for analysis and plotting
cushion_history.iloc[step] = cushion
risky_w_history.iloc[step] = risky_w
account_history.iloc[step] = account_value
floorval_history.iloc[step] = floor_value
peak_history.iloc[step] = peak
risky_wealth = start*(1+risky_r).cumprod()
backtest_result = {
"Wealth": account_history,
"Risky Wealth": risky_wealth,
"Risk Budget": cushion_history,
"Risky Allocation": risky_w_history,
"m": m,
"start": start,
"floor": floor,
"risky_r":risky_r,
"safe_r": safe_r,
"drawdown": drawdown,
"peak": peak_history,
"floor": floorval_history
}
return backtest_result
def summary_stats(r, riskfree_rate=0.03):
"""
Return a DataFrame that contains aggregated summary stats for the returns in the columns of r
"""
ann_r = annualize_rets(r)
ann_vol = annualize_vol(r)
ann_sr = sharpe_ratio(r, riskfree_rate=riskfree_rate)
dd = r.aggregate(lambda r: drawdown(r).Drawdown.min())
skew = r.aggregate(skewness)
kurt = r.aggregate(kurtosis)
cf_var5 = r.aggregate(var_gaussian, modified=True)
hist_cvar5 = r.aggregate(cvar_historic)
return pd.DataFrame({
"Annualized Return": ann_r,
"Annualized Vol": ann_vol,
"Skewness": skew,
"Kurtosis": kurt,
"Cornish-Fisher VaR (5%)": cf_var5,
"Historic CVaR (5%)": hist_cvar5,
"Sharpe Ratio": ann_sr,
"Max Drawdown": dd
})
def gbm(n_years = 10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100.0, prices=True):
"""
Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo
:param n_years: The number of years to generate data for
:param n_paths: The number of scenarios/trajectories
:param mu: Annualized Drift, e.g. Market Return
:param sigma: Annualized Volatility
:param steps_per_year: granularity of the simulation
:param s_0: initial value
:return: a numpy array of n_paths columns and n_years*steps_per_year rows
"""
# Derive per-step Model Parameters from User Specifications
dt = 1/steps_per_year
n_steps = int(n_years*steps_per_year) + 1
# the standard way ...
# rets_plus_1 = np.random.normal(loc=mu*dt+1, scale=sigma*np.sqrt(dt), size=(n_steps, n_scenarios))
# without discretization error ...
rets_plus_1 = np.random.normal(loc=(1+mu)**dt, scale=(sigma*np.sqrt(dt)), size=(n_steps, n_scenarios))
rets_plus_1[0] = 1
ret_val = s_0*pd.DataFrame(rets_plus_1).cumprod() if prices else rets_plus_1-1
return ret_val
import statsmodels.api as sm
def regress(dependent_variable, explanatory_variables, alpha=True):
"""
Runs a linear regression to decompose the dependent variable into the explanatory variables
returns an object of type statsmodel's RegressionResults on which you can call
.summary() to print a full summary
.params for the coefficients
.tvalues and .pvalues for the significance levels
.rsquared_adj and .rsquared for quality of fit
"""
if alpha:
explanatory_variables = explanatory_variables.copy()
explanatory_variables["Alpha"] = 1
lm = sm.OLS(dependent_variable, explanatory_variables).fit()
return lm
def portfolio_tracking_error(weights, ref_r, bb_r):
"""
returns the tracking error between the reference returns
and a portfolio of building block returns held with given weights
"""
return tracking_error(ref_r, (weights*bb_r).sum(axis=1))
def style_analysis(dependent_variable, explanatory_variables):
"""
Returns the optimal weights that minimizes the Tracking error between
a portfolio of the explanatory variables and the dependent variable
"""
n = explanatory_variables.shape[1]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
solution = minimize(portfolio_tracking_error, init_guess,
args=(dependent_variable, explanatory_variables,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
weights = pd.Series(solution.x, index=explanatory_variables.columns)
return weights
def ff_analysis(r, factors):
"""
Returns the loadings of r on the Fama French Factors
which can be read in using get_fff_returns()
the index of r must be a (not necessarily proper) subset of the index of factors
r is either a Series or a DataFrame
"""
if isinstance(r, pd.Series):
dependent_variable = r
explanatory_variables = factors.loc[r.index]
tilts = regress(dependent_variable, explanatory_variables).params
elif isinstance(r, pd.DataFrame):
tilts = pd.DataFrame({col: ff_analysis(r[col], factors) for col in r.columns})
else:
raise TypeError("r must be a Series or a DataFrame")
return tilts
def weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
If supplied a set of capweights and a capweight tether, it is applied and reweighted
"""
n = len(r.columns)
ew = pd.Series(1/n, index=r.columns)
if cap_weights is not None:
cw = cap_weights.loc[r.index[0]] # starting cap weight
## exclude microcaps
if microcap_threshold is not None and microcap_threshold > 0:
microcap = cw < microcap_threshold
ew[microcap] = 0
ew = ew/ew.sum()
#limit weight to a multiple of capweight
if max_cw_mult is not None and max_cw_mult > 0:
ew = np.minimum(ew, cw*max_cw_mult)
ew = ew/ew.sum() #reweight
return ew
def weight_cw(r, cap_weights, **kwargs):
"""
Returns the weights of the CW portfolio based on the time series of capweights
"""
w = cap_weights.loc[r.index[1]]
return w/w.sum()
# MASKED: backtest_ws function (lines 657-671)
def sample_cov(r, **kwargs):
"""
Returns the sample covariance of the supplied returns
"""
return r.cov()
def weight_gmv(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the GMV portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return gmv(est_cov)
def cc_cov(r, **kwargs):
"""
Estimates a covariance matrix by using the Elton/Gruber Constant Correlation model
"""
rhos = r.corr()
n = rhos.shape[0]
# this is a symmetric matrix with diagonals all 1 - so the mean correlation is ...
rho_bar = (rhos.values.sum()-n)/(n*(n-1))
ccor = np.full_like(rhos, rho_bar)
np.fill_diagonal(ccor, 1.)
sd = r.std()
return pd.DataFrame(ccor * np.outer(sd, sd), index=r.columns, columns=r.columns)
def shrinkage_cov(r, delta=0.5, **kwargs):
"""
Covariance estimator that shrinks between the Sample Covariance and the Constant Correlation Estimators
"""
prior = cc_cov(r, **kwargs)
sample = sample_cov(r, **kwargs)
return delta*prior + (1-delta)*sample
def risk_contribution(w,cov):
"""
Compute the contributions to risk of the constituents of a portfolio, given a set of portfolio weights and a covariance matrix
"""
total_portfolio_var = portfolio_vol(w,cov)**2
# Marginal contribution of each constituent
marginal_contrib = cov@w
risk_contrib = np.multiply(marginal_contrib,w.T)/total_portfolio_var
return risk_contrib
def target_risk_contributions(target_risk, cov):
"""
Returns the weights of the portfolio that gives you the weights such
that the contributions to portfolio risk are as close as possible to
the target_risk, given the covariance matrix
"""
n = cov.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def msd_risk(weights, target_risk, cov):
"""
Returns the Mean Squared Difference in risk contributions
between weights and target_risk
"""
w_contribs = risk_contribution(weights, cov)
return ((w_contribs-target_risk)**2).sum()
weights = minimize(msd_risk, init_guess,
args=(target_risk, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def equal_risk_contributions(cov):
"""
Returns the weights of the portfolio that equalizes the contributions
of the constituents based on the given covariance matrix
"""
n = cov.shape[0]
return target_risk_contributions(target_risk=np.repeat(1/n,n), cov=cov)
def weight_erc(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the ERC portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return equal_risk_contributions(est_cov)
def implied_returns(delta, sigma, w):
"""
Obtain the implied expected returns by reverse engineering the weights
Inputs:
delta: Risk Aversion Coefficient (scalar)
sigma: Variance-Covariance Matrix (N x N) as DataFrame
w: Portfolio weights (N x 1) as Series
Returns an N x 1 vector of Returns as Series
"""
ir = delta * sigma.dot(w).squeeze() # to get a series from a 1-column dataframe
ir.name = 'Implied Returns'
return ir
# Assumes that Omega is proportional to the variance of the prior
def proportional_prior(sigma, tau, p):
"""
Returns the He-Litterman simplified Omega
Inputs:
sigma: N x N Covariance Matrix as DataFrame
tau: a scalar
p: a K x N DataFrame linking Q and Assets
returns a P x P DataFrame, a Matrix representing Prior Uncertainties
"""
helit_omega = p.dot(tau * sigma).dot(p.T)
# Make a diag matrix from the diag elements of Omega
return pd.DataFrame(np.diag(np.diag(helit_omega.values)),index=p.index, columns=p.index)
def bl(w_prior, sigma_prior, p, q,
omega=None,
delta=2.5, tau=.02):
"""
# Computes the posterior expected returns based on
# the original black litterman reference model
#
# W.prior must be an N x 1 vector of weights, a Series
# Sigma.prior is an N x N covariance matrix, a DataFrame
# P must be a K x N matrix linking Q and the Assets, a DataFrame
# Q must be an K x 1 vector of views, a Series
# Omega must be a K x K matrix a DataFrame, or None
# if Omega is None, we assume it is
# proportional to variance of the prior
# delta and tau are scalars
"""
if omega is None:
omega = proportional_prior(sigma_prior, tau, p)
# Force w.prior and Q to be column vectors
# How many assets do we have?
N = w_prior.shape[0]
# And how many views?
K = q.shape[0]
# First, reverse-engineer the weights to get pi
pi = implied_returns(delta, sigma_prior, w_prior)
# Adjust (scale) Sigma by the uncertainty scaling factor
sigma_prior_scaled = tau * sigma_prior
# posterior estimate of the mean, use the "Master Formula"
# we use the versions that do not require
# Omega to be inverted (see previous section)
# this is easier to read if we use '@' for matrixmult instead of .dot()
# mu_bl = pi + sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ (q - p @ pi)
mu_bl = pi + sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega).dot(q - p.dot(pi).values))
# posterior estimate of uncertainty of mu.bl
# sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ p @ sigma_prior_scaled
sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega)).dot(p).dot(sigma_prior_scaled)
return (mu_bl, sigma_bl)
# for convenience and readability, define the inverse of a dataframe
def inverse(d):
"""
Invert the dataframe by inverting the underlying matrix
"""
return pd.DataFrame(inv(d.values), index=d.columns, columns=d.index)
def weight_msr(sigma, mu, scale=True):
"""
Optimal (Tangent/Max Sharpe Ratio) Portfolio weights
by using the Markowitz Optimization Procedure
Mu is the vector of Excess expected Returns
Sigma must be an N x N matrix as a DataFrame and Mu a column vector as a Series
This implements page 188 Equation 5.2.28 of
"The econometrics of financial markets" Campbell, Lo and Mackinlay.
"""
w = inverse(sigma).dot(mu)
if scale:
w = w/sum(w) # fix: this assumes all w is +ve
return w
|
def backtest_ws(r, estimation_window=60, weighting=weight_ew, verbose=False, **kwargs):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
# return windows
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window)]
weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]
# convert List of weights to DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window:].index, columns=r.columns)
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
| 657
| 671
|
import pandas as pd
import numpy as np
from numpy.linalg import inv
def get_ffme_returns():
"""
Load the Fama-French Dataset for the returns of the Top and Bottom Deciles by MarketCap
"""
me_m = pd.read_csv("data/Portfolios_Formed_on_ME_monthly_EW.csv",
header=0, index_col=0, na_values=-99.99)
rets = me_m[['Lo 10', 'Hi 10']]
rets.columns = ['SmallCap', 'LargeCap']
rets = rets/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_fff_returns():
"""
Load the Fama-French Research Factor Monthly Dataset
"""
rets = pd.read_csv("data/F-F_Research_Data_Factors_m.csv",
header=0, index_col=0, na_values=-99.99)/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_hfi_returns():
"""
Load and format the EDHEC Hedge Fund Index Returns
"""
hfi = pd.read_csv("data/edhec-hedgefundindices.csv",
header=0, index_col=0, parse_dates=True)
hfi = hfi/100
hfi.index = hfi.index.to_period('M')
return hfi
def get_ind_file(filetype, weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios files
Variant is a tuple of (weighting, size) where:
weighting is one of "ew", "vw"
number of inds is 30 or 49
"""
if filetype is "returns":
name = f"{weighting}_rets"
divisor = 100
elif filetype is "nfirms":
name = "nfirms"
divisor = 1
elif filetype is "size":
name = "size"
divisor = 1
else:
raise ValueError(f"filetype must be one of: returns, nfirms, size")
ind = pd.read_csv(f"data/ind{n_inds}_m_{name}.csv", header=0, index_col=0, na_values=-99.99)/divisor
ind.index = pd.to_datetime(ind.index, format="%Y%m").to_period('M')
ind.columns = ind.columns.str.strip()
return ind
def get_ind_returns(weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios Monthly Returns
"""
return get_ind_file("returns", weighting=weighting, n_inds=n_inds)
def get_ind_nfirms(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average number of Firms
"""
return get_ind_file("nfirms", n_inds=n_inds)
def get_ind_size(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average size (market cap)
"""
return get_ind_file("size", n_inds=n_inds)
def get_ind_market_caps(n_inds=30, weights=False):
"""
Load the industry portfolio data and derive the market caps
"""
ind_nfirms = get_ind_nfirms(n_inds=n_inds)
ind_size = get_ind_size(n_inds=n_inds)
ind_mktcap = ind_nfirms * ind_size
if weights:
total_mktcap = ind_mktcap.sum(axis=1)
ind_capweight = ind_mktcap.divide(total_mktcap, axis="rows")
return ind_capweight
#else
return ind_mktcap
def get_total_market_index_returns(n_inds=30):
"""
Load the 30 industry portfolio data and derive the returns of a capweighted total market index
"""
ind_capweight = get_ind_market_caps(n_inds=n_inds)
ind_return = get_ind_returns(weighting="vw", n_inds=n_inds)
total_market_return = (ind_capweight * ind_return).sum(axis="columns")
return total_market_return
def skewness(r):
"""
Alternative to scipy.stats.skew()
Computes the skewness of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**3).mean()
return exp/sigma_r**3
def kurtosis(r):
"""
Alternative to scipy.stats.kurtosis()
Computes the kurtosis of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**4).mean()
return exp/sigma_r**4
def compound(r):
"""
returns the result of compounding the set of returns in r
"""
return np.expm1(np.log1p(r).sum())
def annualize_rets(r):
"""
Annualizes a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
compounded_growth = (1+r_valid).prod()
return compounded_growth**(1/years_fraction)-1
def annualize_vol(r):
"""
Annualizes the vol of a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
return r_valid.std()*((periods_per_year)**0.5)
def sharpe_ratio(r, riskfree_rate):
"""
Computes the annualized sharpe ratio of a set of returns
"""
# convert the annual riskfree rate to per period
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
rf_per_period = (1+riskfree_rate)**(1/periods_per_year)-1
excess_ret = r - rf_per_period
ann_ex_ret = annualize_rets(excess_ret)
ann_vol = annualize_vol(r)
return ann_ex_ret/ann_vol
import scipy.stats
def is_normal(r, level=0.01):
"""
Applies the Jarque-Bera test to determine if a Series is normal or not
Test is applied at the 1% level by default
Returns True if the hypothesis of normality is accepted, False otherwise
"""
if isinstance(r, pd.DataFrame):
return r.aggregate(is_normal)
else:
statistic, p_value = scipy.stats.jarque_bera(r)
return p_value > level
def drawdown(return_series: pd.Series):
"""Takes a time series of asset returns.
returns a DataFrame with columns for
the wealth index,
the previous peaks, and
the percentage drawdown
"""
wealth_index = 1000*(1+return_series).cumprod()
previous_peaks = wealth_index.cummax()
drawdowns = (wealth_index - previous_peaks)/previous_peaks
return pd.DataFrame({"Wealth": wealth_index,
"Previous Peak": previous_peaks,
"Drawdown": drawdowns})
def semideviation(r):
"""
Returns the semideviation aka negative semideviation of r
r must be a Series or a DataFrame, else raises a TypeError
"""
if isinstance(r, pd.Series):
is_negative = r < 0
return r[is_negative].std(ddof=0)
elif isinstance(r, pd.DataFrame):
return r.aggregate(semideviation)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def var_historic(r, level=5):
"""
Returns the historic Value at Risk at a specified level
i.e. returns the number such that "level" percent of the returns
fall below that number, and the (100-level) percent are above
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.DataFrame):
return r.aggregate(var_historic, level=level)
elif isinstance(r, pd.Series):
return -np.percentile(r, level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def cvar_historic(r, level=5):
"""
Computes the Conditional VaR of Series or DataFrame
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.Series):
is_beyond = r <= -var_historic(r, level=level)
return -r[is_beyond].mean()
elif isinstance(r, pd.DataFrame):
return r.aggregate(cvar_historic, level=level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
from scipy.stats import norm
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gauusian VaR of a Series or DataFrame
If "modified" is True, then the modified VaR is returned,
using the Cornish-Fisher modification
"""
# compute the Z score assuming it was Gaussian
r = r[(r!=0) & (r.notnull())]
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1)*s/6 +
(z**3 -3*z)*(k-3)/24 -
(2*z**3 - 5*z)*(s**2)/36
)
return -(r.mean() + z*r.std(ddof=0))
def portfolio_return(weights, returns):
"""
Computes the return on a portfolio from constituent returns and weights
weights are a numpy array or Nx1 matrix and returns are a numpy array or Nx1 matrix
"""
return weights.T @ returns
def portfolio_vol(weights, covmat):
"""
Computes the vol of a portfolio from a covariance matrix and constituent weights
weights are a numpy array or N x 1 maxtrix and covmat is an N x N matrix
"""
vol = (weights.T @ covmat @ weights)**0.5
return vol
def plot_ef2(n_points, er, cov):
"""
Plots the 2-asset efficient frontier
"""
if er.shape[0] != 2 or er.shape[0] != 2:
raise ValueError("plot_ef2 can only plot 2-asset frontiers")
weights = [np.array([w, 1-w]) for w in np.linspace(0, 1, n_points)]
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
return ef.plot.line(x="Volatility", y="Returns", style=".-")
from scipy.optimize import minimize
def minimize_vol(target_return, er, cov):
"""
Returns the optimal weights that achieve the target return
given a set of expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
return_is_target = {'type': 'eq',
'args': (er,),
'fun': lambda weights, er: target_return - portfolio_return(weights,er)
}
weights = minimize(portfolio_vol, init_guess,
args=(cov,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,return_is_target),
bounds=bounds)
return weights.x
def tracking_error(r_a, r_b):
"""
Returns the Tracking Error between the two return series
"""
return np.sqrt(((r_a - r_b)**2).sum())
def msr(riskfree_rate, er, cov):
"""
Returns the weights of the portfolio that gives you the maximum sharpe ratio
given the riskfree rate and expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def neg_sharpe(weights, riskfree_rate, er, cov):
"""
Returns the negative of the sharpe ratio
of the given portfolio
"""
r = portfolio_return(weights, er)
vol = portfolio_vol(weights, cov)
return -(r - riskfree_rate)/vol
weights = minimize(neg_sharpe, init_guess,
args=(riskfree_rate, er, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def gmv(cov):
"""
Returns the weights of the Global Minimum Volatility portfolio
given a covariance matrix
"""
n = cov.shape[0]
return msr(0, np.repeat(1, n), cov)
def optimal_weights(n_points, er, cov):
"""
Returns a list of weights that represent a grid of n_points on the efficient frontier
"""
target_rs = np.linspace(er.min(), er.max(), n_points)
weights = [minimize_vol(target_return, er, cov) for target_return in target_rs]
return weights
def plot_ef(n_points, er, cov, style='.-', legend=False, show_cml=False, riskfree_rate=0, show_ew=False, show_gmv=False):
"""
Plots the multi-asset efficient frontier
"""
weights = optimal_weights(n_points, er, cov)
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
ax = ef.plot.line(x="Volatility", y="Returns", style=style, legend=legend)
if show_cml:
ax.set_xlim(left = 0)
# get MSR
w_msr = msr(riskfree_rate, er, cov)
r_msr = portfolio_return(w_msr, er)
vol_msr = portfolio_vol(w_msr, cov)
# add CML
cml_x = [0, vol_msr]
cml_y = [riskfree_rate, r_msr]
ax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=10)
if show_ew:
n = er.shape[0]
w_ew = np.repeat(1/n, n)
r_ew = portfolio_return(w_ew, er)
vol_ew = portfolio_vol(w_ew, cov)
# add EW
ax.plot([vol_ew], [r_ew], color='goldenrod', marker='o', markersize=10)
if show_gmv:
w_gmv = gmv(cov)
r_gmv = portfolio_return(w_gmv, er)
vol_gmv = portfolio_vol(w_gmv, cov)
# add EW
ax.plot([vol_gmv], [r_gmv], color='midnightblue', marker='o', markersize=10)
return ax
def run_cppi(risky_r, safe_r=None, m=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None):
"""
Run a backtest of the CPPI strategy, given a set of returns for the risky asset
Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History
"""
# set up the CPPI parameters
dates = risky_r.index
n_steps = len(dates)
account_value = start
floor_value = start*floor
peak = account_value
if isinstance(risky_r, pd.Series):
risky_r = pd.DataFrame(risky_r, columns=["R"])
if safe_r is None:
safe_r = pd.DataFrame().reindex_like(risky_r)
safe_r.values[:] = riskfree_rate/12 # fast way to set all values to a number
# set up some DataFrames for saving intermediate values
account_history = pd.DataFrame().reindex_like(risky_r)
risky_w_history = pd.DataFrame().reindex_like(risky_r)
cushion_history = pd.DataFrame().reindex_like(risky_r)
floorval_history = pd.DataFrame().reindex_like(risky_r)
peak_history = pd.DataFrame().reindex_like(risky_r)
for step in range(n_steps):
if drawdown is not None:
peak = np.maximum(peak, account_value)
floor_value = peak*(1-drawdown)
cushion = (account_value - floor_value)/account_value
risky_w = m*cushion
risky_w = np.minimum(risky_w, 1)
risky_w = np.maximum(risky_w, 0)
safe_w = 1-risky_w
risky_alloc = account_value*risky_w
safe_alloc = account_value*safe_w
# recompute the new account value at the end of this step
account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])
# save the histories for analysis and plotting
cushion_history.iloc[step] = cushion
risky_w_history.iloc[step] = risky_w
account_history.iloc[step] = account_value
floorval_history.iloc[step] = floor_value
peak_history.iloc[step] = peak
risky_wealth = start*(1+risky_r).cumprod()
backtest_result = {
"Wealth": account_history,
"Risky Wealth": risky_wealth,
"Risk Budget": cushion_history,
"Risky Allocation": risky_w_history,
"m": m,
"start": start,
"floor": floor,
"risky_r":risky_r,
"safe_r": safe_r,
"drawdown": drawdown,
"peak": peak_history,
"floor": floorval_history
}
return backtest_result
def summary_stats(r, riskfree_rate=0.03):
"""
Return a DataFrame that contains aggregated summary stats for the returns in the columns of r
"""
ann_r = annualize_rets(r)
ann_vol = annualize_vol(r)
ann_sr = sharpe_ratio(r, riskfree_rate=riskfree_rate)
dd = r.aggregate(lambda r: drawdown(r).Drawdown.min())
skew = r.aggregate(skewness)
kurt = r.aggregate(kurtosis)
cf_var5 = r.aggregate(var_gaussian, modified=True)
hist_cvar5 = r.aggregate(cvar_historic)
return pd.DataFrame({
"Annualized Return": ann_r,
"Annualized Vol": ann_vol,
"Skewness": skew,
"Kurtosis": kurt,
"Cornish-Fisher VaR (5%)": cf_var5,
"Historic CVaR (5%)": hist_cvar5,
"Sharpe Ratio": ann_sr,
"Max Drawdown": dd
})
def gbm(n_years = 10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100.0, prices=True):
"""
Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo
:param n_years: The number of years to generate data for
:param n_paths: The number of scenarios/trajectories
:param mu: Annualized Drift, e.g. Market Return
:param sigma: Annualized Volatility
:param steps_per_year: granularity of the simulation
:param s_0: initial value
:return: a numpy array of n_paths columns and n_years*steps_per_year rows
"""
# Derive per-step Model Parameters from User Specifications
dt = 1/steps_per_year
n_steps = int(n_years*steps_per_year) + 1
# the standard way ...
# rets_plus_1 = np.random.normal(loc=mu*dt+1, scale=sigma*np.sqrt(dt), size=(n_steps, n_scenarios))
# without discretization error ...
rets_plus_1 = np.random.normal(loc=(1+mu)**dt, scale=(sigma*np.sqrt(dt)), size=(n_steps, n_scenarios))
rets_plus_1[0] = 1
ret_val = s_0*pd.DataFrame(rets_plus_1).cumprod() if prices else rets_plus_1-1
return ret_val
import statsmodels.api as sm
def regress(dependent_variable, explanatory_variables, alpha=True):
"""
Runs a linear regression to decompose the dependent variable into the explanatory variables
returns an object of type statsmodel's RegressionResults on which you can call
.summary() to print a full summary
.params for the coefficients
.tvalues and .pvalues for the significance levels
.rsquared_adj and .rsquared for quality of fit
"""
if alpha:
explanatory_variables = explanatory_variables.copy()
explanatory_variables["Alpha"] = 1
lm = sm.OLS(dependent_variable, explanatory_variables).fit()
return lm
def portfolio_tracking_error(weights, ref_r, bb_r):
"""
returns the tracking error between the reference returns
and a portfolio of building block returns held with given weights
"""
return tracking_error(ref_r, (weights*bb_r).sum(axis=1))
def style_analysis(dependent_variable, explanatory_variables):
"""
Returns the optimal weights that minimizes the Tracking error between
a portfolio of the explanatory variables and the dependent variable
"""
n = explanatory_variables.shape[1]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
solution = minimize(portfolio_tracking_error, init_guess,
args=(dependent_variable, explanatory_variables,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
weights = pd.Series(solution.x, index=explanatory_variables.columns)
return weights
def ff_analysis(r, factors):
"""
Returns the loadings of r on the Fama French Factors
which can be read in using get_fff_returns()
the index of r must be a (not necessarily proper) subset of the index of factors
r is either a Series or a DataFrame
"""
if isinstance(r, pd.Series):
dependent_variable = r
explanatory_variables = factors.loc[r.index]
tilts = regress(dependent_variable, explanatory_variables).params
elif isinstance(r, pd.DataFrame):
tilts = pd.DataFrame({col: ff_analysis(r[col], factors) for col in r.columns})
else:
raise TypeError("r must be a Series or a DataFrame")
return tilts
def weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
If supplied a set of capweights and a capweight tether, it is applied and reweighted
"""
n = len(r.columns)
ew = pd.Series(1/n, index=r.columns)
if cap_weights is not None:
cw = cap_weights.loc[r.index[0]] # starting cap weight
## exclude microcaps
if microcap_threshold is not None and microcap_threshold > 0:
microcap = cw < microcap_threshold
ew[microcap] = 0
ew = ew/ew.sum()
#limit weight to a multiple of capweight
if max_cw_mult is not None and max_cw_mult > 0:
ew = np.minimum(ew, cw*max_cw_mult)
ew = ew/ew.sum() #reweight
return ew
def weight_cw(r, cap_weights, **kwargs):
"""
Returns the weights of the CW portfolio based on the time series of capweights
"""
w = cap_weights.loc[r.index[1]]
return w/w.sum()
def backtest_ws(r, estimation_window=60, weighting=weight_ew, verbose=False, **kwargs):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
# return windows
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window)]
weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]
# convert List of weights to DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window:].index, columns=r.columns)
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
def sample_cov(r, **kwargs):
"""
Returns the sample covariance of the supplied returns
"""
return r.cov()
def weight_gmv(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the GMV portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return gmv(est_cov)
def cc_cov(r, **kwargs):
"""
Estimates a covariance matrix by using the Elton/Gruber Constant Correlation model
"""
rhos = r.corr()
n = rhos.shape[0]
# this is a symmetric matrix with diagonals all 1 - so the mean correlation is ...
rho_bar = (rhos.values.sum()-n)/(n*(n-1))
ccor = np.full_like(rhos, rho_bar)
np.fill_diagonal(ccor, 1.)
sd = r.std()
return pd.DataFrame(ccor * np.outer(sd, sd), index=r.columns, columns=r.columns)
def shrinkage_cov(r, delta=0.5, **kwargs):
"""
Covariance estimator that shrinks between the Sample Covariance and the Constant Correlation Estimators
"""
prior = cc_cov(r, **kwargs)
sample = sample_cov(r, **kwargs)
return delta*prior + (1-delta)*sample
def risk_contribution(w,cov):
"""
Compute the contributions to risk of the constituents of a portfolio, given a set of portfolio weights and a covariance matrix
"""
total_portfolio_var = portfolio_vol(w,cov)**2
# Marginal contribution of each constituent
marginal_contrib = cov@w
risk_contrib = np.multiply(marginal_contrib,w.T)/total_portfolio_var
return risk_contrib
def target_risk_contributions(target_risk, cov):
"""
Returns the weights of the portfolio that gives you the weights such
that the contributions to portfolio risk are as close as possible to
the target_risk, given the covariance matrix
"""
n = cov.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def msd_risk(weights, target_risk, cov):
"""
Returns the Mean Squared Difference in risk contributions
between weights and target_risk
"""
w_contribs = risk_contribution(weights, cov)
return ((w_contribs-target_risk)**2).sum()
weights = minimize(msd_risk, init_guess,
args=(target_risk, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def equal_risk_contributions(cov):
"""
Returns the weights of the portfolio that equalizes the contributions
of the constituents based on the given covariance matrix
"""
n = cov.shape[0]
return target_risk_contributions(target_risk=np.repeat(1/n,n), cov=cov)
def weight_erc(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the ERC portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return equal_risk_contributions(est_cov)
def implied_returns(delta, sigma, w):
"""
Obtain the implied expected returns by reverse engineering the weights
Inputs:
delta: Risk Aversion Coefficient (scalar)
sigma: Variance-Covariance Matrix (N x N) as DataFrame
w: Portfolio weights (N x 1) as Series
Returns an N x 1 vector of Returns as Series
"""
ir = delta * sigma.dot(w).squeeze() # to get a series from a 1-column dataframe
ir.name = 'Implied Returns'
return ir
# Assumes that Omega is proportional to the variance of the prior
def proportional_prior(sigma, tau, p):
"""
Returns the He-Litterman simplified Omega
Inputs:
sigma: N x N Covariance Matrix as DataFrame
tau: a scalar
p: a K x N DataFrame linking Q and Assets
returns a P x P DataFrame, a Matrix representing Prior Uncertainties
"""
helit_omega = p.dot(tau * sigma).dot(p.T)
# Make a diag matrix from the diag elements of Omega
return pd.DataFrame(np.diag(np.diag(helit_omega.values)),index=p.index, columns=p.index)
def bl(w_prior, sigma_prior, p, q,
omega=None,
delta=2.5, tau=.02):
"""
# Computes the posterior expected returns based on
# the original black litterman reference model
#
# W.prior must be an N x 1 vector of weights, a Series
# Sigma.prior is an N x N covariance matrix, a DataFrame
# P must be a K x N matrix linking Q and the Assets, a DataFrame
# Q must be an K x 1 vector of views, a Series
# Omega must be a K x K matrix a DataFrame, or None
# if Omega is None, we assume it is
# proportional to variance of the prior
# delta and tau are scalars
"""
if omega is None:
omega = proportional_prior(sigma_prior, tau, p)
# Force w.prior and Q to be column vectors
# How many assets do we have?
N = w_prior.shape[0]
# And how many views?
K = q.shape[0]
# First, reverse-engineer the weights to get pi
pi = implied_returns(delta, sigma_prior, w_prior)
# Adjust (scale) Sigma by the uncertainty scaling factor
sigma_prior_scaled = tau * sigma_prior
# posterior estimate of the mean, use the "Master Formula"
# we use the versions that do not require
# Omega to be inverted (see previous section)
# this is easier to read if we use '@' for matrixmult instead of .dot()
# mu_bl = pi + sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ (q - p @ pi)
mu_bl = pi + sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega).dot(q - p.dot(pi).values))
# posterior estimate of uncertainty of mu.bl
# sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ p @ sigma_prior_scaled
sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega)).dot(p).dot(sigma_prior_scaled)
return (mu_bl, sigma_bl)
# for convenience and readability, define the inverse of a dataframe
def inverse(d):
"""
Invert the dataframe by inverting the underlying matrix
"""
return pd.DataFrame(inv(d.values), index=d.columns, columns=d.index)
def weight_msr(sigma, mu, scale=True):
"""
Optimal (Tangent/Max Sharpe Ratio) Portfolio weights
by using the Markowitz Optimization Procedure
Mu is the vector of Excess expected Returns
Sigma must be an N x N matrix as a DataFrame and Mu a column vector as a Series
This implements page 188 Equation 5.2.28 of
"The econometrics of financial markets" Campbell, Lo and Mackinlay.
"""
w = inverse(sigma).dot(mu)
if scale:
w = w/sum(w) # fix: this assumes all w is +ve
return w
|
bl
|
# Computes the posterior expected returns based on
# the original black litterman reference model
#
# W.prior must be an N x 1 vector of weights, a Series
# Sigma.prior is an N x N covariance matrix, a DataFrame
# P must be a K x N matrix linking Q and the Assets, a DataFrame
# Q must be an K x 1 vector of views, a Series
# Omega must be a K x K matrix a DataFrame, or None
# if Omega is None, we assume it is
# proportional to variance of the prior
# delta and tau are scalars
|
import pandas as pd
import numpy as np
from numpy.linalg import inv
def get_ffme_returns():
"""
Load the Fama-French Dataset for the returns of the Top and Bottom Deciles by MarketCap
"""
me_m = pd.read_csv("data/Portfolios_Formed_on_ME_monthly_EW.csv",
header=0, index_col=0, na_values=-99.99)
rets = me_m[['Lo 10', 'Hi 10']]
rets.columns = ['SmallCap', 'LargeCap']
rets = rets/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_fff_returns():
"""
Load the Fama-French Research Factor Monthly Dataset
"""
rets = pd.read_csv("data/F-F_Research_Data_Factors_m.csv",
header=0, index_col=0, na_values=-99.99)/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_hfi_returns():
"""
Load and format the EDHEC Hedge Fund Index Returns
"""
hfi = pd.read_csv("data/edhec-hedgefundindices.csv",
header=0, index_col=0, parse_dates=True)
hfi = hfi/100
hfi.index = hfi.index.to_period('M')
return hfi
def get_ind_file(filetype, weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios files
Variant is a tuple of (weighting, size) where:
weighting is one of "ew", "vw"
number of inds is 30 or 49
"""
if filetype is "returns":
name = f"{weighting}_rets"
divisor = 100
elif filetype is "nfirms":
name = "nfirms"
divisor = 1
elif filetype is "size":
name = "size"
divisor = 1
else:
raise ValueError(f"filetype must be one of: returns, nfirms, size")
ind = pd.read_csv(f"data/ind{n_inds}_m_{name}.csv", header=0, index_col=0, na_values=-99.99)/divisor
ind.index = pd.to_datetime(ind.index, format="%Y%m").to_period('M')
ind.columns = ind.columns.str.strip()
return ind
def get_ind_returns(weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios Monthly Returns
"""
return get_ind_file("returns", weighting=weighting, n_inds=n_inds)
def get_ind_nfirms(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average number of Firms
"""
return get_ind_file("nfirms", n_inds=n_inds)
def get_ind_size(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average size (market cap)
"""
return get_ind_file("size", n_inds=n_inds)
def get_ind_market_caps(n_inds=30, weights=False):
"""
Load the industry portfolio data and derive the market caps
"""
ind_nfirms = get_ind_nfirms(n_inds=n_inds)
ind_size = get_ind_size(n_inds=n_inds)
ind_mktcap = ind_nfirms * ind_size
if weights:
total_mktcap = ind_mktcap.sum(axis=1)
ind_capweight = ind_mktcap.divide(total_mktcap, axis="rows")
return ind_capweight
#else
return ind_mktcap
def get_total_market_index_returns(n_inds=30):
"""
Load the 30 industry portfolio data and derive the returns of a capweighted total market index
"""
ind_capweight = get_ind_market_caps(n_inds=n_inds)
ind_return = get_ind_returns(weighting="vw", n_inds=n_inds)
total_market_return = (ind_capweight * ind_return).sum(axis="columns")
return total_market_return
def skewness(r):
"""
Alternative to scipy.stats.skew()
Computes the skewness of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**3).mean()
return exp/sigma_r**3
def kurtosis(r):
"""
Alternative to scipy.stats.kurtosis()
Computes the kurtosis of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**4).mean()
return exp/sigma_r**4
def compound(r):
"""
returns the result of compounding the set of returns in r
"""
return np.expm1(np.log1p(r).sum())
def annualize_rets(r):
"""
Annualizes a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
compounded_growth = (1+r_valid).prod()
return compounded_growth**(1/years_fraction)-1
def annualize_vol(r):
"""
Annualizes the vol of a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
return r_valid.std()*((periods_per_year)**0.5)
def sharpe_ratio(r, riskfree_rate):
"""
Computes the annualized sharpe ratio of a set of returns
"""
# convert the annual riskfree rate to per period
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
rf_per_period = (1+riskfree_rate)**(1/periods_per_year)-1
excess_ret = r - rf_per_period
ann_ex_ret = annualize_rets(excess_ret)
ann_vol = annualize_vol(r)
return ann_ex_ret/ann_vol
import scipy.stats
def is_normal(r, level=0.01):
"""
Applies the Jarque-Bera test to determine if a Series is normal or not
Test is applied at the 1% level by default
Returns True if the hypothesis of normality is accepted, False otherwise
"""
if isinstance(r, pd.DataFrame):
return r.aggregate(is_normal)
else:
statistic, p_value = scipy.stats.jarque_bera(r)
return p_value > level
def drawdown(return_series: pd.Series):
"""Takes a time series of asset returns.
returns a DataFrame with columns for
the wealth index,
the previous peaks, and
the percentage drawdown
"""
wealth_index = 1000*(1+return_series).cumprod()
previous_peaks = wealth_index.cummax()
drawdowns = (wealth_index - previous_peaks)/previous_peaks
return pd.DataFrame({"Wealth": wealth_index,
"Previous Peak": previous_peaks,
"Drawdown": drawdowns})
def semideviation(r):
"""
Returns the semideviation aka negative semideviation of r
r must be a Series or a DataFrame, else raises a TypeError
"""
if isinstance(r, pd.Series):
is_negative = r < 0
return r[is_negative].std(ddof=0)
elif isinstance(r, pd.DataFrame):
return r.aggregate(semideviation)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def var_historic(r, level=5):
"""
Returns the historic Value at Risk at a specified level
i.e. returns the number such that "level" percent of the returns
fall below that number, and the (100-level) percent are above
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.DataFrame):
return r.aggregate(var_historic, level=level)
elif isinstance(r, pd.Series):
return -np.percentile(r, level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def cvar_historic(r, level=5):
"""
Computes the Conditional VaR of Series or DataFrame
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.Series):
is_beyond = r <= -var_historic(r, level=level)
return -r[is_beyond].mean()
elif isinstance(r, pd.DataFrame):
return r.aggregate(cvar_historic, level=level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
from scipy.stats import norm
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gauusian VaR of a Series or DataFrame
If "modified" is True, then the modified VaR is returned,
using the Cornish-Fisher modification
"""
# compute the Z score assuming it was Gaussian
r = r[(r!=0) & (r.notnull())]
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1)*s/6 +
(z**3 -3*z)*(k-3)/24 -
(2*z**3 - 5*z)*(s**2)/36
)
return -(r.mean() + z*r.std(ddof=0))
def portfolio_return(weights, returns):
"""
Computes the return on a portfolio from constituent returns and weights
weights are a numpy array or Nx1 matrix and returns are a numpy array or Nx1 matrix
"""
return weights.T @ returns
def portfolio_vol(weights, covmat):
"""
Computes the vol of a portfolio from a covariance matrix and constituent weights
weights are a numpy array or N x 1 maxtrix and covmat is an N x N matrix
"""
vol = (weights.T @ covmat @ weights)**0.5
return vol
def plot_ef2(n_points, er, cov):
"""
Plots the 2-asset efficient frontier
"""
if er.shape[0] != 2 or er.shape[0] != 2:
raise ValueError("plot_ef2 can only plot 2-asset frontiers")
weights = [np.array([w, 1-w]) for w in np.linspace(0, 1, n_points)]
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
return ef.plot.line(x="Volatility", y="Returns", style=".-")
from scipy.optimize import minimize
def minimize_vol(target_return, er, cov):
"""
Returns the optimal weights that achieve the target return
given a set of expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
return_is_target = {'type': 'eq',
'args': (er,),
'fun': lambda weights, er: target_return - portfolio_return(weights,er)
}
weights = minimize(portfolio_vol, init_guess,
args=(cov,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,return_is_target),
bounds=bounds)
return weights.x
def tracking_error(r_a, r_b):
"""
Returns the Tracking Error between the two return series
"""
return np.sqrt(((r_a - r_b)**2).sum())
def msr(riskfree_rate, er, cov):
"""
Returns the weights of the portfolio that gives you the maximum sharpe ratio
given the riskfree rate and expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def neg_sharpe(weights, riskfree_rate, er, cov):
"""
Returns the negative of the sharpe ratio
of the given portfolio
"""
r = portfolio_return(weights, er)
vol = portfolio_vol(weights, cov)
return -(r - riskfree_rate)/vol
weights = minimize(neg_sharpe, init_guess,
args=(riskfree_rate, er, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def gmv(cov):
"""
Returns the weights of the Global Minimum Volatility portfolio
given a covariance matrix
"""
n = cov.shape[0]
return msr(0, np.repeat(1, n), cov)
def optimal_weights(n_points, er, cov):
"""
Returns a list of weights that represent a grid of n_points on the efficient frontier
"""
target_rs = np.linspace(er.min(), er.max(), n_points)
weights = [minimize_vol(target_return, er, cov) for target_return in target_rs]
return weights
def plot_ef(n_points, er, cov, style='.-', legend=False, show_cml=False, riskfree_rate=0, show_ew=False, show_gmv=False):
"""
Plots the multi-asset efficient frontier
"""
weights = optimal_weights(n_points, er, cov)
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
ax = ef.plot.line(x="Volatility", y="Returns", style=style, legend=legend)
if show_cml:
ax.set_xlim(left = 0)
# get MSR
w_msr = msr(riskfree_rate, er, cov)
r_msr = portfolio_return(w_msr, er)
vol_msr = portfolio_vol(w_msr, cov)
# add CML
cml_x = [0, vol_msr]
cml_y = [riskfree_rate, r_msr]
ax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=10)
if show_ew:
n = er.shape[0]
w_ew = np.repeat(1/n, n)
r_ew = portfolio_return(w_ew, er)
vol_ew = portfolio_vol(w_ew, cov)
# add EW
ax.plot([vol_ew], [r_ew], color='goldenrod', marker='o', markersize=10)
if show_gmv:
w_gmv = gmv(cov)
r_gmv = portfolio_return(w_gmv, er)
vol_gmv = portfolio_vol(w_gmv, cov)
# add EW
ax.plot([vol_gmv], [r_gmv], color='midnightblue', marker='o', markersize=10)
return ax
def run_cppi(risky_r, safe_r=None, m=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None):
"""
Run a backtest of the CPPI strategy, given a set of returns for the risky asset
Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History
"""
# set up the CPPI parameters
dates = risky_r.index
n_steps = len(dates)
account_value = start
floor_value = start*floor
peak = account_value
if isinstance(risky_r, pd.Series):
risky_r = pd.DataFrame(risky_r, columns=["R"])
if safe_r is None:
safe_r = pd.DataFrame().reindex_like(risky_r)
safe_r.values[:] = riskfree_rate/12 # fast way to set all values to a number
# set up some DataFrames for saving intermediate values
account_history = pd.DataFrame().reindex_like(risky_r)
risky_w_history = pd.DataFrame().reindex_like(risky_r)
cushion_history = pd.DataFrame().reindex_like(risky_r)
floorval_history = pd.DataFrame().reindex_like(risky_r)
peak_history = pd.DataFrame().reindex_like(risky_r)
for step in range(n_steps):
if drawdown is not None:
peak = np.maximum(peak, account_value)
floor_value = peak*(1-drawdown)
cushion = (account_value - floor_value)/account_value
risky_w = m*cushion
risky_w = np.minimum(risky_w, 1)
risky_w = np.maximum(risky_w, 0)
safe_w = 1-risky_w
risky_alloc = account_value*risky_w
safe_alloc = account_value*safe_w
# recompute the new account value at the end of this step
account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])
# save the histories for analysis and plotting
cushion_history.iloc[step] = cushion
risky_w_history.iloc[step] = risky_w
account_history.iloc[step] = account_value
floorval_history.iloc[step] = floor_value
peak_history.iloc[step] = peak
risky_wealth = start*(1+risky_r).cumprod()
backtest_result = {
"Wealth": account_history,
"Risky Wealth": risky_wealth,
"Risk Budget": cushion_history,
"Risky Allocation": risky_w_history,
"m": m,
"start": start,
"floor": floor,
"risky_r":risky_r,
"safe_r": safe_r,
"drawdown": drawdown,
"peak": peak_history,
"floor": floorval_history
}
return backtest_result
def summary_stats(r, riskfree_rate=0.03):
"""
Return a DataFrame that contains aggregated summary stats for the returns in the columns of r
"""
ann_r = annualize_rets(r)
ann_vol = annualize_vol(r)
ann_sr = sharpe_ratio(r, riskfree_rate=riskfree_rate)
dd = r.aggregate(lambda r: drawdown(r).Drawdown.min())
skew = r.aggregate(skewness)
kurt = r.aggregate(kurtosis)
cf_var5 = r.aggregate(var_gaussian, modified=True)
hist_cvar5 = r.aggregate(cvar_historic)
return pd.DataFrame({
"Annualized Return": ann_r,
"Annualized Vol": ann_vol,
"Skewness": skew,
"Kurtosis": kurt,
"Cornish-Fisher VaR (5%)": cf_var5,
"Historic CVaR (5%)": hist_cvar5,
"Sharpe Ratio": ann_sr,
"Max Drawdown": dd
})
def gbm(n_years = 10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100.0, prices=True):
"""
Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo
:param n_years: The number of years to generate data for
:param n_paths: The number of scenarios/trajectories
:param mu: Annualized Drift, e.g. Market Return
:param sigma: Annualized Volatility
:param steps_per_year: granularity of the simulation
:param s_0: initial value
:return: a numpy array of n_paths columns and n_years*steps_per_year rows
"""
# Derive per-step Model Parameters from User Specifications
dt = 1/steps_per_year
n_steps = int(n_years*steps_per_year) + 1
# the standard way ...
# rets_plus_1 = np.random.normal(loc=mu*dt+1, scale=sigma*np.sqrt(dt), size=(n_steps, n_scenarios))
# without discretization error ...
rets_plus_1 = np.random.normal(loc=(1+mu)**dt, scale=(sigma*np.sqrt(dt)), size=(n_steps, n_scenarios))
rets_plus_1[0] = 1
ret_val = s_0*pd.DataFrame(rets_plus_1).cumprod() if prices else rets_plus_1-1
return ret_val
import statsmodels.api as sm
def regress(dependent_variable, explanatory_variables, alpha=True):
"""
Runs a linear regression to decompose the dependent variable into the explanatory variables
returns an object of type statsmodel's RegressionResults on which you can call
.summary() to print a full summary
.params for the coefficients
.tvalues and .pvalues for the significance levels
.rsquared_adj and .rsquared for quality of fit
"""
if alpha:
explanatory_variables = explanatory_variables.copy()
explanatory_variables["Alpha"] = 1
lm = sm.OLS(dependent_variable, explanatory_variables).fit()
return lm
def portfolio_tracking_error(weights, ref_r, bb_r):
"""
returns the tracking error between the reference returns
and a portfolio of building block returns held with given weights
"""
return tracking_error(ref_r, (weights*bb_r).sum(axis=1))
def style_analysis(dependent_variable, explanatory_variables):
"""
Returns the optimal weights that minimizes the Tracking error between
a portfolio of the explanatory variables and the dependent variable
"""
n = explanatory_variables.shape[1]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
solution = minimize(portfolio_tracking_error, init_guess,
args=(dependent_variable, explanatory_variables,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
weights = pd.Series(solution.x, index=explanatory_variables.columns)
return weights
def ff_analysis(r, factors):
"""
Returns the loadings of r on the Fama French Factors
which can be read in using get_fff_returns()
the index of r must be a (not necessarily proper) subset of the index of factors
r is either a Series or a DataFrame
"""
if isinstance(r, pd.Series):
dependent_variable = r
explanatory_variables = factors.loc[r.index]
tilts = regress(dependent_variable, explanatory_variables).params
elif isinstance(r, pd.DataFrame):
tilts = pd.DataFrame({col: ff_analysis(r[col], factors) for col in r.columns})
else:
raise TypeError("r must be a Series or a DataFrame")
return tilts
def weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
If supplied a set of capweights and a capweight tether, it is applied and reweighted
"""
n = len(r.columns)
ew = pd.Series(1/n, index=r.columns)
if cap_weights is not None:
cw = cap_weights.loc[r.index[0]] # starting cap weight
## exclude microcaps
if microcap_threshold is not None and microcap_threshold > 0:
microcap = cw < microcap_threshold
ew[microcap] = 0
ew = ew/ew.sum()
#limit weight to a multiple of capweight
if max_cw_mult is not None and max_cw_mult > 0:
ew = np.minimum(ew, cw*max_cw_mult)
ew = ew/ew.sum() #reweight
return ew
def weight_cw(r, cap_weights, **kwargs):
"""
Returns the weights of the CW portfolio based on the time series of capweights
"""
w = cap_weights.loc[r.index[1]]
return w/w.sum()
def backtest_ws(r, estimation_window=60, weighting=weight_ew, verbose=False, **kwargs):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
# return windows
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window)]
weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]
# convert List of weights to DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window:].index, columns=r.columns)
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
def sample_cov(r, **kwargs):
"""
Returns the sample covariance of the supplied returns
"""
return r.cov()
def weight_gmv(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the GMV portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return gmv(est_cov)
def cc_cov(r, **kwargs):
"""
Estimates a covariance matrix by using the Elton/Gruber Constant Correlation model
"""
rhos = r.corr()
n = rhos.shape[0]
# this is a symmetric matrix with diagonals all 1 - so the mean correlation is ...
rho_bar = (rhos.values.sum()-n)/(n*(n-1))
ccor = np.full_like(rhos, rho_bar)
np.fill_diagonal(ccor, 1.)
sd = r.std()
return pd.DataFrame(ccor * np.outer(sd, sd), index=r.columns, columns=r.columns)
def shrinkage_cov(r, delta=0.5, **kwargs):
"""
Covariance estimator that shrinks between the Sample Covariance and the Constant Correlation Estimators
"""
prior = cc_cov(r, **kwargs)
sample = sample_cov(r, **kwargs)
return delta*prior + (1-delta)*sample
def risk_contribution(w,cov):
"""
Compute the contributions to risk of the constituents of a portfolio, given a set of portfolio weights and a covariance matrix
"""
total_portfolio_var = portfolio_vol(w,cov)**2
# Marginal contribution of each constituent
marginal_contrib = cov@w
risk_contrib = np.multiply(marginal_contrib,w.T)/total_portfolio_var
return risk_contrib
def target_risk_contributions(target_risk, cov):
"""
Returns the weights of the portfolio that gives you the weights such
that the contributions to portfolio risk are as close as possible to
the target_risk, given the covariance matrix
"""
n = cov.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def msd_risk(weights, target_risk, cov):
"""
Returns the Mean Squared Difference in risk contributions
between weights and target_risk
"""
w_contribs = risk_contribution(weights, cov)
return ((w_contribs-target_risk)**2).sum()
weights = minimize(msd_risk, init_guess,
args=(target_risk, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def equal_risk_contributions(cov):
"""
Returns the weights of the portfolio that equalizes the contributions
of the constituents based on the given covariance matrix
"""
n = cov.shape[0]
return target_risk_contributions(target_risk=np.repeat(1/n,n), cov=cov)
def weight_erc(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the ERC portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return equal_risk_contributions(est_cov)
def implied_returns(delta, sigma, w):
"""
Obtain the implied expected returns by reverse engineering the weights
Inputs:
delta: Risk Aversion Coefficient (scalar)
sigma: Variance-Covariance Matrix (N x N) as DataFrame
w: Portfolio weights (N x 1) as Series
Returns an N x 1 vector of Returns as Series
"""
ir = delta * sigma.dot(w).squeeze() # to get a series from a 1-column dataframe
ir.name = 'Implied Returns'
return ir
# Assumes that Omega is proportional to the variance of the prior
def proportional_prior(sigma, tau, p):
"""
Returns the He-Litterman simplified Omega
Inputs:
sigma: N x N Covariance Matrix as DataFrame
tau: a scalar
p: a K x N DataFrame linking Q and Assets
returns a P x P DataFrame, a Matrix representing Prior Uncertainties
"""
helit_omega = p.dot(tau * sigma).dot(p.T)
# Make a diag matrix from the diag elements of Omega
return pd.DataFrame(np.diag(np.diag(helit_omega.values)),index=p.index, columns=p.index)
# MASKED: bl function (lines 787-823)
# for convenience and readability, define the inverse of a dataframe
def inverse(d):
"""
Invert the dataframe by inverting the underlying matrix
"""
return pd.DataFrame(inv(d.values), index=d.columns, columns=d.index)
def weight_msr(sigma, mu, scale=True):
"""
Optimal (Tangent/Max Sharpe Ratio) Portfolio weights
by using the Markowitz Optimization Procedure
Mu is the vector of Excess expected Returns
Sigma must be an N x N matrix as a DataFrame and Mu a column vector as a Series
This implements page 188 Equation 5.2.28 of
"The econometrics of financial markets" Campbell, Lo and Mackinlay.
"""
w = inverse(sigma).dot(mu)
if scale:
w = w/sum(w) # fix: this assumes all w is +ve
return w
|
def bl(w_prior, sigma_prior, p, q,
omega=None,
delta=2.5, tau=.02):
"""
# Computes the posterior expected returns based on
# the original black litterman reference model
#
# W.prior must be an N x 1 vector of weights, a Series
# Sigma.prior is an N x N covariance matrix, a DataFrame
# P must be a K x N matrix linking Q and the Assets, a DataFrame
# Q must be an K x 1 vector of views, a Series
# Omega must be a K x K matrix a DataFrame, or None
# if Omega is None, we assume it is
# proportional to variance of the prior
# delta and tau are scalars
"""
if omega is None:
omega = proportional_prior(sigma_prior, tau, p)
# Force w.prior and Q to be column vectors
# How many assets do we have?
N = w_prior.shape[0]
# And how many views?
K = q.shape[0]
# First, reverse-engineer the weights to get pi
pi = implied_returns(delta, sigma_prior, w_prior)
# Adjust (scale) Sigma by the uncertainty scaling factor
sigma_prior_scaled = tau * sigma_prior
# posterior estimate of the mean, use the "Master Formula"
# we use the versions that do not require
# Omega to be inverted (see previous section)
# this is easier to read if we use '@' for matrixmult instead of .dot()
# mu_bl = pi + sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ (q - p @ pi)
mu_bl = pi + sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega).dot(q - p.dot(pi).values))
# posterior estimate of uncertainty of mu.bl
# sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ p @ sigma_prior_scaled
sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega)).dot(p).dot(sigma_prior_scaled)
return (mu_bl, sigma_bl)
| 787
| 823
|
import pandas as pd
import numpy as np
from numpy.linalg import inv
def get_ffme_returns():
"""
Load the Fama-French Dataset for the returns of the Top and Bottom Deciles by MarketCap
"""
me_m = pd.read_csv("data/Portfolios_Formed_on_ME_monthly_EW.csv",
header=0, index_col=0, na_values=-99.99)
rets = me_m[['Lo 10', 'Hi 10']]
rets.columns = ['SmallCap', 'LargeCap']
rets = rets/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_fff_returns():
"""
Load the Fama-French Research Factor Monthly Dataset
"""
rets = pd.read_csv("data/F-F_Research_Data_Factors_m.csv",
header=0, index_col=0, na_values=-99.99)/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_hfi_returns():
"""
Load and format the EDHEC Hedge Fund Index Returns
"""
hfi = pd.read_csv("data/edhec-hedgefundindices.csv",
header=0, index_col=0, parse_dates=True)
hfi = hfi/100
hfi.index = hfi.index.to_period('M')
return hfi
def get_ind_file(filetype, weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios files
Variant is a tuple of (weighting, size) where:
weighting is one of "ew", "vw"
number of inds is 30 or 49
"""
if filetype is "returns":
name = f"{weighting}_rets"
divisor = 100
elif filetype is "nfirms":
name = "nfirms"
divisor = 1
elif filetype is "size":
name = "size"
divisor = 1
else:
raise ValueError(f"filetype must be one of: returns, nfirms, size")
ind = pd.read_csv(f"data/ind{n_inds}_m_{name}.csv", header=0, index_col=0, na_values=-99.99)/divisor
ind.index = pd.to_datetime(ind.index, format="%Y%m").to_period('M')
ind.columns = ind.columns.str.strip()
return ind
def get_ind_returns(weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios Monthly Returns
"""
return get_ind_file("returns", weighting=weighting, n_inds=n_inds)
def get_ind_nfirms(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average number of Firms
"""
return get_ind_file("nfirms", n_inds=n_inds)
def get_ind_size(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average size (market cap)
"""
return get_ind_file("size", n_inds=n_inds)
def get_ind_market_caps(n_inds=30, weights=False):
"""
Load the industry portfolio data and derive the market caps
"""
ind_nfirms = get_ind_nfirms(n_inds=n_inds)
ind_size = get_ind_size(n_inds=n_inds)
ind_mktcap = ind_nfirms * ind_size
if weights:
total_mktcap = ind_mktcap.sum(axis=1)
ind_capweight = ind_mktcap.divide(total_mktcap, axis="rows")
return ind_capweight
#else
return ind_mktcap
def get_total_market_index_returns(n_inds=30):
"""
Load the 30 industry portfolio data and derive the returns of a capweighted total market index
"""
ind_capweight = get_ind_market_caps(n_inds=n_inds)
ind_return = get_ind_returns(weighting="vw", n_inds=n_inds)
total_market_return = (ind_capweight * ind_return).sum(axis="columns")
return total_market_return
def skewness(r):
"""
Alternative to scipy.stats.skew()
Computes the skewness of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**3).mean()
return exp/sigma_r**3
def kurtosis(r):
"""
Alternative to scipy.stats.kurtosis()
Computes the kurtosis of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**4).mean()
return exp/sigma_r**4
def compound(r):
"""
returns the result of compounding the set of returns in r
"""
return np.expm1(np.log1p(r).sum())
def annualize_rets(r):
"""
Annualizes a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
compounded_growth = (1+r_valid).prod()
return compounded_growth**(1/years_fraction)-1
def annualize_vol(r):
"""
Annualizes the vol of a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
return r_valid.std()*((periods_per_year)**0.5)
def sharpe_ratio(r, riskfree_rate):
"""
Computes the annualized sharpe ratio of a set of returns
"""
# convert the annual riskfree rate to per period
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
rf_per_period = (1+riskfree_rate)**(1/periods_per_year)-1
excess_ret = r - rf_per_period
ann_ex_ret = annualize_rets(excess_ret)
ann_vol = annualize_vol(r)
return ann_ex_ret/ann_vol
import scipy.stats
def is_normal(r, level=0.01):
"""
Applies the Jarque-Bera test to determine if a Series is normal or not
Test is applied at the 1% level by default
Returns True if the hypothesis of normality is accepted, False otherwise
"""
if isinstance(r, pd.DataFrame):
return r.aggregate(is_normal)
else:
statistic, p_value = scipy.stats.jarque_bera(r)
return p_value > level
def drawdown(return_series: pd.Series):
"""Takes a time series of asset returns.
returns a DataFrame with columns for
the wealth index,
the previous peaks, and
the percentage drawdown
"""
wealth_index = 1000*(1+return_series).cumprod()
previous_peaks = wealth_index.cummax()
drawdowns = (wealth_index - previous_peaks)/previous_peaks
return pd.DataFrame({"Wealth": wealth_index,
"Previous Peak": previous_peaks,
"Drawdown": drawdowns})
def semideviation(r):
"""
Returns the semideviation aka negative semideviation of r
r must be a Series or a DataFrame, else raises a TypeError
"""
if isinstance(r, pd.Series):
is_negative = r < 0
return r[is_negative].std(ddof=0)
elif isinstance(r, pd.DataFrame):
return r.aggregate(semideviation)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def var_historic(r, level=5):
"""
Returns the historic Value at Risk at a specified level
i.e. returns the number such that "level" percent of the returns
fall below that number, and the (100-level) percent are above
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.DataFrame):
return r.aggregate(var_historic, level=level)
elif isinstance(r, pd.Series):
return -np.percentile(r, level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def cvar_historic(r, level=5):
"""
Computes the Conditional VaR of Series or DataFrame
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.Series):
is_beyond = r <= -var_historic(r, level=level)
return -r[is_beyond].mean()
elif isinstance(r, pd.DataFrame):
return r.aggregate(cvar_historic, level=level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
from scipy.stats import norm
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gauusian VaR of a Series or DataFrame
If "modified" is True, then the modified VaR is returned,
using the Cornish-Fisher modification
"""
# compute the Z score assuming it was Gaussian
r = r[(r!=0) & (r.notnull())]
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1)*s/6 +
(z**3 -3*z)*(k-3)/24 -
(2*z**3 - 5*z)*(s**2)/36
)
return -(r.mean() + z*r.std(ddof=0))
def portfolio_return(weights, returns):
"""
Computes the return on a portfolio from constituent returns and weights
weights are a numpy array or Nx1 matrix and returns are a numpy array or Nx1 matrix
"""
return weights.T @ returns
def portfolio_vol(weights, covmat):
"""
Computes the vol of a portfolio from a covariance matrix and constituent weights
weights are a numpy array or N x 1 maxtrix and covmat is an N x N matrix
"""
vol = (weights.T @ covmat @ weights)**0.5
return vol
def plot_ef2(n_points, er, cov):
"""
Plots the 2-asset efficient frontier
"""
if er.shape[0] != 2 or er.shape[0] != 2:
raise ValueError("plot_ef2 can only plot 2-asset frontiers")
weights = [np.array([w, 1-w]) for w in np.linspace(0, 1, n_points)]
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
return ef.plot.line(x="Volatility", y="Returns", style=".-")
from scipy.optimize import minimize
def minimize_vol(target_return, er, cov):
"""
Returns the optimal weights that achieve the target return
given a set of expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
return_is_target = {'type': 'eq',
'args': (er,),
'fun': lambda weights, er: target_return - portfolio_return(weights,er)
}
weights = minimize(portfolio_vol, init_guess,
args=(cov,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,return_is_target),
bounds=bounds)
return weights.x
def tracking_error(r_a, r_b):
"""
Returns the Tracking Error between the two return series
"""
return np.sqrt(((r_a - r_b)**2).sum())
def msr(riskfree_rate, er, cov):
"""
Returns the weights of the portfolio that gives you the maximum sharpe ratio
given the riskfree rate and expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def neg_sharpe(weights, riskfree_rate, er, cov):
"""
Returns the negative of the sharpe ratio
of the given portfolio
"""
r = portfolio_return(weights, er)
vol = portfolio_vol(weights, cov)
return -(r - riskfree_rate)/vol
weights = minimize(neg_sharpe, init_guess,
args=(riskfree_rate, er, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def gmv(cov):
"""
Returns the weights of the Global Minimum Volatility portfolio
given a covariance matrix
"""
n = cov.shape[0]
return msr(0, np.repeat(1, n), cov)
def optimal_weights(n_points, er, cov):
"""
Returns a list of weights that represent a grid of n_points on the efficient frontier
"""
target_rs = np.linspace(er.min(), er.max(), n_points)
weights = [minimize_vol(target_return, er, cov) for target_return in target_rs]
return weights
def plot_ef(n_points, er, cov, style='.-', legend=False, show_cml=False, riskfree_rate=0, show_ew=False, show_gmv=False):
"""
Plots the multi-asset efficient frontier
"""
weights = optimal_weights(n_points, er, cov)
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
ax = ef.plot.line(x="Volatility", y="Returns", style=style, legend=legend)
if show_cml:
ax.set_xlim(left = 0)
# get MSR
w_msr = msr(riskfree_rate, er, cov)
r_msr = portfolio_return(w_msr, er)
vol_msr = portfolio_vol(w_msr, cov)
# add CML
cml_x = [0, vol_msr]
cml_y = [riskfree_rate, r_msr]
ax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=10)
if show_ew:
n = er.shape[0]
w_ew = np.repeat(1/n, n)
r_ew = portfolio_return(w_ew, er)
vol_ew = portfolio_vol(w_ew, cov)
# add EW
ax.plot([vol_ew], [r_ew], color='goldenrod', marker='o', markersize=10)
if show_gmv:
w_gmv = gmv(cov)
r_gmv = portfolio_return(w_gmv, er)
vol_gmv = portfolio_vol(w_gmv, cov)
# add EW
ax.plot([vol_gmv], [r_gmv], color='midnightblue', marker='o', markersize=10)
return ax
def run_cppi(risky_r, safe_r=None, m=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None):
"""
Run a backtest of the CPPI strategy, given a set of returns for the risky asset
Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History
"""
# set up the CPPI parameters
dates = risky_r.index
n_steps = len(dates)
account_value = start
floor_value = start*floor
peak = account_value
if isinstance(risky_r, pd.Series):
risky_r = pd.DataFrame(risky_r, columns=["R"])
if safe_r is None:
safe_r = pd.DataFrame().reindex_like(risky_r)
safe_r.values[:] = riskfree_rate/12 # fast way to set all values to a number
# set up some DataFrames for saving intermediate values
account_history = pd.DataFrame().reindex_like(risky_r)
risky_w_history = pd.DataFrame().reindex_like(risky_r)
cushion_history = pd.DataFrame().reindex_like(risky_r)
floorval_history = pd.DataFrame().reindex_like(risky_r)
peak_history = pd.DataFrame().reindex_like(risky_r)
for step in range(n_steps):
if drawdown is not None:
peak = np.maximum(peak, account_value)
floor_value = peak*(1-drawdown)
cushion = (account_value - floor_value)/account_value
risky_w = m*cushion
risky_w = np.minimum(risky_w, 1)
risky_w = np.maximum(risky_w, 0)
safe_w = 1-risky_w
risky_alloc = account_value*risky_w
safe_alloc = account_value*safe_w
# recompute the new account value at the end of this step
account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])
# save the histories for analysis and plotting
cushion_history.iloc[step] = cushion
risky_w_history.iloc[step] = risky_w
account_history.iloc[step] = account_value
floorval_history.iloc[step] = floor_value
peak_history.iloc[step] = peak
risky_wealth = start*(1+risky_r).cumprod()
backtest_result = {
"Wealth": account_history,
"Risky Wealth": risky_wealth,
"Risk Budget": cushion_history,
"Risky Allocation": risky_w_history,
"m": m,
"start": start,
"floor": floor,
"risky_r":risky_r,
"safe_r": safe_r,
"drawdown": drawdown,
"peak": peak_history,
"floor": floorval_history
}
return backtest_result
def summary_stats(r, riskfree_rate=0.03):
"""
Return a DataFrame that contains aggregated summary stats for the returns in the columns of r
"""
ann_r = annualize_rets(r)
ann_vol = annualize_vol(r)
ann_sr = sharpe_ratio(r, riskfree_rate=riskfree_rate)
dd = r.aggregate(lambda r: drawdown(r).Drawdown.min())
skew = r.aggregate(skewness)
kurt = r.aggregate(kurtosis)
cf_var5 = r.aggregate(var_gaussian, modified=True)
hist_cvar5 = r.aggregate(cvar_historic)
return pd.DataFrame({
"Annualized Return": ann_r,
"Annualized Vol": ann_vol,
"Skewness": skew,
"Kurtosis": kurt,
"Cornish-Fisher VaR (5%)": cf_var5,
"Historic CVaR (5%)": hist_cvar5,
"Sharpe Ratio": ann_sr,
"Max Drawdown": dd
})
def gbm(n_years = 10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100.0, prices=True):
"""
Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo
:param n_years: The number of years to generate data for
:param n_paths: The number of scenarios/trajectories
:param mu: Annualized Drift, e.g. Market Return
:param sigma: Annualized Volatility
:param steps_per_year: granularity of the simulation
:param s_0: initial value
:return: a numpy array of n_paths columns and n_years*steps_per_year rows
"""
# Derive per-step Model Parameters from User Specifications
dt = 1/steps_per_year
n_steps = int(n_years*steps_per_year) + 1
# the standard way ...
# rets_plus_1 = np.random.normal(loc=mu*dt+1, scale=sigma*np.sqrt(dt), size=(n_steps, n_scenarios))
# without discretization error ...
rets_plus_1 = np.random.normal(loc=(1+mu)**dt, scale=(sigma*np.sqrt(dt)), size=(n_steps, n_scenarios))
rets_plus_1[0] = 1
ret_val = s_0*pd.DataFrame(rets_plus_1).cumprod() if prices else rets_plus_1-1
return ret_val
import statsmodels.api as sm
def regress(dependent_variable, explanatory_variables, alpha=True):
"""
Runs a linear regression to decompose the dependent variable into the explanatory variables
returns an object of type statsmodel's RegressionResults on which you can call
.summary() to print a full summary
.params for the coefficients
.tvalues and .pvalues for the significance levels
.rsquared_adj and .rsquared for quality of fit
"""
if alpha:
explanatory_variables = explanatory_variables.copy()
explanatory_variables["Alpha"] = 1
lm = sm.OLS(dependent_variable, explanatory_variables).fit()
return lm
def portfolio_tracking_error(weights, ref_r, bb_r):
"""
returns the tracking error between the reference returns
and a portfolio of building block returns held with given weights
"""
return tracking_error(ref_r, (weights*bb_r).sum(axis=1))
def style_analysis(dependent_variable, explanatory_variables):
"""
Returns the optimal weights that minimizes the Tracking error between
a portfolio of the explanatory variables and the dependent variable
"""
n = explanatory_variables.shape[1]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
solution = minimize(portfolio_tracking_error, init_guess,
args=(dependent_variable, explanatory_variables,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
weights = pd.Series(solution.x, index=explanatory_variables.columns)
return weights
def ff_analysis(r, factors):
"""
Returns the loadings of r on the Fama French Factors
which can be read in using get_fff_returns()
the index of r must be a (not necessarily proper) subset of the index of factors
r is either a Series or a DataFrame
"""
if isinstance(r, pd.Series):
dependent_variable = r
explanatory_variables = factors.loc[r.index]
tilts = regress(dependent_variable, explanatory_variables).params
elif isinstance(r, pd.DataFrame):
tilts = pd.DataFrame({col: ff_analysis(r[col], factors) for col in r.columns})
else:
raise TypeError("r must be a Series or a DataFrame")
return tilts
def weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
If supplied a set of capweights and a capweight tether, it is applied and reweighted
"""
n = len(r.columns)
ew = pd.Series(1/n, index=r.columns)
if cap_weights is not None:
cw = cap_weights.loc[r.index[0]] # starting cap weight
## exclude microcaps
if microcap_threshold is not None and microcap_threshold > 0:
microcap = cw < microcap_threshold
ew[microcap] = 0
ew = ew/ew.sum()
#limit weight to a multiple of capweight
if max_cw_mult is not None and max_cw_mult > 0:
ew = np.minimum(ew, cw*max_cw_mult)
ew = ew/ew.sum() #reweight
return ew
def weight_cw(r, cap_weights, **kwargs):
"""
Returns the weights of the CW portfolio based on the time series of capweights
"""
w = cap_weights.loc[r.index[1]]
return w/w.sum()
def backtest_ws(r, estimation_window=60, weighting=weight_ew, verbose=False, **kwargs):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
# return windows
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window)]
weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]
# convert List of weights to DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window:].index, columns=r.columns)
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
def sample_cov(r, **kwargs):
"""
Returns the sample covariance of the supplied returns
"""
return r.cov()
def weight_gmv(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the GMV portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return gmv(est_cov)
def cc_cov(r, **kwargs):
"""
Estimates a covariance matrix by using the Elton/Gruber Constant Correlation model
"""
rhos = r.corr()
n = rhos.shape[0]
# this is a symmetric matrix with diagonals all 1 - so the mean correlation is ...
rho_bar = (rhos.values.sum()-n)/(n*(n-1))
ccor = np.full_like(rhos, rho_bar)
np.fill_diagonal(ccor, 1.)
sd = r.std()
return pd.DataFrame(ccor * np.outer(sd, sd), index=r.columns, columns=r.columns)
def shrinkage_cov(r, delta=0.5, **kwargs):
"""
Covariance estimator that shrinks between the Sample Covariance and the Constant Correlation Estimators
"""
prior = cc_cov(r, **kwargs)
sample = sample_cov(r, **kwargs)
return delta*prior + (1-delta)*sample
def risk_contribution(w,cov):
"""
Compute the contributions to risk of the constituents of a portfolio, given a set of portfolio weights and a covariance matrix
"""
total_portfolio_var = portfolio_vol(w,cov)**2
# Marginal contribution of each constituent
marginal_contrib = cov@w
risk_contrib = np.multiply(marginal_contrib,w.T)/total_portfolio_var
return risk_contrib
def target_risk_contributions(target_risk, cov):
"""
Returns the weights of the portfolio that gives you the weights such
that the contributions to portfolio risk are as close as possible to
the target_risk, given the covariance matrix
"""
n = cov.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def msd_risk(weights, target_risk, cov):
"""
Returns the Mean Squared Difference in risk contributions
between weights and target_risk
"""
w_contribs = risk_contribution(weights, cov)
return ((w_contribs-target_risk)**2).sum()
weights = minimize(msd_risk, init_guess,
args=(target_risk, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def equal_risk_contributions(cov):
"""
Returns the weights of the portfolio that equalizes the contributions
of the constituents based on the given covariance matrix
"""
n = cov.shape[0]
return target_risk_contributions(target_risk=np.repeat(1/n,n), cov=cov)
def weight_erc(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the ERC portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return equal_risk_contributions(est_cov)
def implied_returns(delta, sigma, w):
"""
Obtain the implied expected returns by reverse engineering the weights
Inputs:
delta: Risk Aversion Coefficient (scalar)
sigma: Variance-Covariance Matrix (N x N) as DataFrame
w: Portfolio weights (N x 1) as Series
Returns an N x 1 vector of Returns as Series
"""
ir = delta * sigma.dot(w).squeeze() # to get a series from a 1-column dataframe
ir.name = 'Implied Returns'
return ir
# Assumes that Omega is proportional to the variance of the prior
def proportional_prior(sigma, tau, p):
"""
Returns the He-Litterman simplified Omega
Inputs:
sigma: N x N Covariance Matrix as DataFrame
tau: a scalar
p: a K x N DataFrame linking Q and Assets
returns a P x P DataFrame, a Matrix representing Prior Uncertainties
"""
helit_omega = p.dot(tau * sigma).dot(p.T)
# Make a diag matrix from the diag elements of Omega
return pd.DataFrame(np.diag(np.diag(helit_omega.values)),index=p.index, columns=p.index)
def bl(w_prior, sigma_prior, p, q,
omega=None,
delta=2.5, tau=.02):
"""
# Computes the posterior expected returns based on
# the original black litterman reference model
#
# W.prior must be an N x 1 vector of weights, a Series
# Sigma.prior is an N x N covariance matrix, a DataFrame
# P must be a K x N matrix linking Q and the Assets, a DataFrame
# Q must be an K x 1 vector of views, a Series
# Omega must be a K x K matrix a DataFrame, or None
# if Omega is None, we assume it is
# proportional to variance of the prior
# delta and tau are scalars
"""
if omega is None:
omega = proportional_prior(sigma_prior, tau, p)
# Force w.prior and Q to be column vectors
# How many assets do we have?
N = w_prior.shape[0]
# And how many views?
K = q.shape[0]
# First, reverse-engineer the weights to get pi
pi = implied_returns(delta, sigma_prior, w_prior)
# Adjust (scale) Sigma by the uncertainty scaling factor
sigma_prior_scaled = tau * sigma_prior
# posterior estimate of the mean, use the "Master Formula"
# we use the versions that do not require
# Omega to be inverted (see previous section)
# this is easier to read if we use '@' for matrixmult instead of .dot()
# mu_bl = pi + sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ (q - p @ pi)
mu_bl = pi + sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega).dot(q - p.dot(pi).values))
# posterior estimate of uncertainty of mu.bl
# sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ p @ sigma_prior_scaled
sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega)).dot(p).dot(sigma_prior_scaled)
return (mu_bl, sigma_bl)
# for convenience and readability, define the inverse of a dataframe
def inverse(d):
"""
Invert the dataframe by inverting the underlying matrix
"""
return pd.DataFrame(inv(d.values), index=d.columns, columns=d.index)
def weight_msr(sigma, mu, scale=True):
"""
Optimal (Tangent/Max Sharpe Ratio) Portfolio weights
by using the Markowitz Optimization Procedure
Mu is the vector of Excess expected Returns
Sigma must be an N x N matrix as a DataFrame and Mu a column vector as a Series
This implements page 188 Equation 5.2.28 of
"The econometrics of financial markets" Campbell, Lo and Mackinlay.
"""
w = inverse(sigma).dot(mu)
if scale:
w = w/sum(w) # fix: this assumes all w is +ve
return w
|
process_sources
|
Function that checks the news results and turn them into objects
Args:
sources_list: A list of dictionaries that contain sources details
|
import urllib.request,json
from .models import Sources, Articles
from datetime import datetime
#Getting api key
api_key = None
#Getting the news base url
# NEWS_API_KEY = None
# NEWS_API_BASE_URL = None
ARTICLE = None
def configure_request(app):
global api_key,NEWS_API_BASE_URL,NEWS_API_KEY,ARTICLE
api_key = app.config['NEWS_API_KEY']
ARTICLE = app.config['ARTICLE']
NEWS_API_BASE_URL = app.config['NEWS_API_BASE_URL']
NEWS_API_KEY = app.config['NEWS_API_KEY']
def get_source(category):
'''
function that gets the json response to our url request
'''
get_source_url = NEWS_API_BASE_URL.format(category,api_key)
print(get_source_url)
with urllib.request.urlopen(get_source_url) as url:
get_source_data = url.read()
get_source_response = json.loads(get_source_data)
sources_result = None
if get_source_response['sources']:
sources_results_list = get_source_response['sources']
sources_result = process_sources(sources_results_list)
print(sources_result)
return sources_result
# MASKED: process_sources function (lines 40-60)
def get_articles(id):
'''
Function that processes the articles and returns a list of articles objects
'''
get_articles_url = ARTICLE.format(id,api_key)
print(get_articles_url)
with urllib.request.urlopen(get_articles_url) as url:
article_data = url.read()
articles_response = json.loads(article_data)
articles_object = None
if articles_response['articles']:
response_list= articles_response['articles']
articles_object = process_articles(response_list)
return articles_object
def process_articles(articles_list):
'''
function that checks the articles and processes them into instances
'''
articles_object = []
for article_item in articles_list:
author = article_item.get('name')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
image = article_item.get('urlToImage')
date = article_item.get('publishedAt')
if image:
articles_result = Articles(author,title,description,url,image,date)
articles_object.append(articles_result)
return articles_object
|
def process_sources(sources_list):
'''
Function that checks the news results and turn them into objects
Args:
sources_list: A list of dictionaries that contain sources details
'''
sources_result = []
for source_item in sources_list:
author = source_item.get('author')
title = source_item.get('title')
imageurl = source_item.get('urltoimage')
description = source_item.get('description')
url = source_item.get('url')
id = source_item.get('id')
sources_object = Sources(author, title,imageurl,description,url,id)
sources_result.append(sources_object)
return sources_result
| 40
| 60
|
import urllib.request,json
from .models import Sources, Articles
from datetime import datetime
#Getting api key
api_key = None
#Getting the news base url
# NEWS_API_KEY = None
# NEWS_API_BASE_URL = None
ARTICLE = None
def configure_request(app):
global api_key,NEWS_API_BASE_URL,NEWS_API_KEY,ARTICLE
api_key = app.config['NEWS_API_KEY']
ARTICLE = app.config['ARTICLE']
NEWS_API_BASE_URL = app.config['NEWS_API_BASE_URL']
NEWS_API_KEY = app.config['NEWS_API_KEY']
def get_source(category):
'''
function that gets the json response to our url request
'''
get_source_url = NEWS_API_BASE_URL.format(category,api_key)
print(get_source_url)
with urllib.request.urlopen(get_source_url) as url:
get_source_data = url.read()
get_source_response = json.loads(get_source_data)
sources_result = None
if get_source_response['sources']:
sources_results_list = get_source_response['sources']
sources_result = process_sources(sources_results_list)
print(sources_result)
return sources_result
def process_sources(sources_list):
'''
Function that checks the news results and turn them into objects
Args:
sources_list: A list of dictionaries that contain sources details
'''
sources_result = []
for source_item in sources_list:
author = source_item.get('author')
title = source_item.get('title')
imageurl = source_item.get('urltoimage')
description = source_item.get('description')
url = source_item.get('url')
id = source_item.get('id')
sources_object = Sources(author, title,imageurl,description,url,id)
sources_result.append(sources_object)
return sources_result
def get_articles(id):
'''
Function that processes the articles and returns a list of articles objects
'''
get_articles_url = ARTICLE.format(id,api_key)
print(get_articles_url)
with urllib.request.urlopen(get_articles_url) as url:
article_data = url.read()
articles_response = json.loads(article_data)
articles_object = None
if articles_response['articles']:
response_list= articles_response['articles']
articles_object = process_articles(response_list)
return articles_object
def process_articles(articles_list):
'''
function that checks the articles and processes them into instances
'''
articles_object = []
for article_item in articles_list:
author = article_item.get('name')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
image = article_item.get('urlToImage')
date = article_item.get('publishedAt')
if image:
articles_result = Articles(author,title,description,url,image,date)
articles_object.append(articles_result)
return articles_object
|
post
|
Authenticate Customer (Sign In). Retrieves the authenticated
customer (a customer that matches the given email/password pair).
If used with an access token for Anonymous Sessions,
all orders and carts belonging to the anonymousId will be assigned to the newly created customer.
If a cart is is returned as part of the CustomerSignInResult,
it has been recalculated (It will have up-to-date prices, taxes and discounts,
and invalid line items have been removed.).
|
# This file is automatically generated by the rmf-codegen project.
#
# The Python code generator is maintained by Lab Digital. If you want to
# contribute to this project then please do not edit this file directly
# but send a pull request to the Lab Digital fork of rmf-codegen at
# https://github.com/labd/rmf-codegen
import typing
import warnings
from ...models.customer import CustomerSignin, CustomerSignInResult
from ...models.error import ErrorResponse
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyLoginRequestBuilder:
_client: "BaseClient"
_project_key: str
def __init__(
self,
project_key: str,
client: "BaseClient",
):
self._project_key = project_key
self._client = client
# MASKED: post function (lines 30-63)
|
def post(
self,
body: "CustomerSignin",
*,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> typing.Optional["CustomerSignInResult"]:
"""Authenticate Customer (Sign In). Retrieves the authenticated
customer (a customer that matches the given email/password pair).
If used with an access token for Anonymous Sessions,
all orders and carts belonging to the anonymousId will be assigned to the newly created customer.
If a cart is is returned as part of the CustomerSignInResult,
it has been recalculated (It will have up-to-date prices, taxes and discounts,
and invalid line items have been removed.).
"""
headers = {} if headers is None else headers
response = self._client._post(
endpoint=f"/{self._project_key}/login",
params={},
json=body.serialize(),
headers={"Content-Type": "application/json", **headers},
options=options,
)
if response.status_code in (201, 200):
return CustomerSignInResult.deserialize(response.json())
elif response.status_code in (400, 401, 403, 500, 503):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
elif response.status_code == 404:
return None
elif response.status_code == 200:
return None
warnings.warn("Unhandled status code %d" % response.status_code)
| 30
| 63
|
# This file is automatically generated by the rmf-codegen project.
#
# The Python code generator is maintained by Lab Digital. If you want to
# contribute to this project then please do not edit this file directly
# but send a pull request to the Lab Digital fork of rmf-codegen at
# https://github.com/labd/rmf-codegen
import typing
import warnings
from ...models.customer import CustomerSignin, CustomerSignInResult
from ...models.error import ErrorResponse
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyLoginRequestBuilder:
_client: "BaseClient"
_project_key: str
def __init__(
self,
project_key: str,
client: "BaseClient",
):
self._project_key = project_key
self._client = client
def post(
self,
body: "CustomerSignin",
*,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> typing.Optional["CustomerSignInResult"]:
"""Authenticate Customer (Sign In). Retrieves the authenticated
customer (a customer that matches the given email/password pair).
If used with an access token for Anonymous Sessions,
all orders and carts belonging to the anonymousId will be assigned to the newly created customer.
If a cart is is returned as part of the CustomerSignInResult,
it has been recalculated (It will have up-to-date prices, taxes and discounts,
and invalid line items have been removed.).
"""
headers = {} if headers is None else headers
response = self._client._post(
endpoint=f"/{self._project_key}/login",
params={},
json=body.serialize(),
headers={"Content-Type": "application/json", **headers},
options=options,
)
if response.status_code in (201, 200):
return CustomerSignInResult.deserialize(response.json())
elif response.status_code in (400, 401, 403, 500, 503):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
elif response.status_code == 404:
return None
elif response.status_code == 200:
return None
warnings.warn("Unhandled status code %d" % response.status_code)
|
__init__
|
:param vpcId: 私有网络vpcId
:param subnetId: 子网subnetId
:param instanceVersion: es版本,当前支持5.6.9和6.5.4
:param instanceName: es集群名称,不可为空,只支持大小写字母、数字、英文下划线或者中划线,以字母开头且不能超过32位
:param azId: 可用区,各可用区编码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/restrictions
:param instanceClass: 规格配置,规格代码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/specifications
:param ipVersion: (Optional) 是否支持ipv6,支持值为v4&v6,不支持为空
:param dedicatedMaster: (Optional) 是否包含专用主节点,默认false
:param coordinating: (Optional) 是否包含协调节点,默认false
:param autoSnapshot: (Optional) 自动快照设置。
:param authConfig: (Optional) es数据面身份验证设置信息
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class InstanceSpec(object):
# MASKED: __init__ function (lines 22-47)
|
def __init__(self, vpcId, subnetId, instanceVersion, instanceName, azId, instanceClass, ipVersion=None, dedicatedMaster=None, coordinating=None, autoSnapshot=None, authConfig=None):
"""
:param vpcId: 私有网络vpcId
:param subnetId: 子网subnetId
:param instanceVersion: es版本,当前支持5.6.9和6.5.4
:param instanceName: es集群名称,不可为空,只支持大小写字母、数字、英文下划线或者中划线,以字母开头且不能超过32位
:param azId: 可用区,各可用区编码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/restrictions
:param instanceClass: 规格配置,规格代码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/specifications
:param ipVersion: (Optional) 是否支持ipv6,支持值为v4&v6,不支持为空
:param dedicatedMaster: (Optional) 是否包含专用主节点,默认false
:param coordinating: (Optional) 是否包含协调节点,默认false
:param autoSnapshot: (Optional) 自动快照设置。
:param authConfig: (Optional) es数据面身份验证设置信息
"""
self.vpcId = vpcId
self.subnetId = subnetId
self.instanceVersion = instanceVersion
self.instanceName = instanceName
self.azId = azId
self.instanceClass = instanceClass
self.ipVersion = ipVersion
self.dedicatedMaster = dedicatedMaster
self.coordinating = coordinating
self.autoSnapshot = autoSnapshot
self.authConfig = authConfig
| 22
| 47
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class InstanceSpec(object):
def __init__(self, vpcId, subnetId, instanceVersion, instanceName, azId, instanceClass, ipVersion=None, dedicatedMaster=None, coordinating=None, autoSnapshot=None, authConfig=None):
"""
:param vpcId: 私有网络vpcId
:param subnetId: 子网subnetId
:param instanceVersion: es版本,当前支持5.6.9和6.5.4
:param instanceName: es集群名称,不可为空,只支持大小写字母、数字、英文下划线或者中划线,以字母开头且不能超过32位
:param azId: 可用区,各可用区编码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/restrictions
:param instanceClass: 规格配置,规格代码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/specifications
:param ipVersion: (Optional) 是否支持ipv6,支持值为v4&v6,不支持为空
:param dedicatedMaster: (Optional) 是否包含专用主节点,默认false
:param coordinating: (Optional) 是否包含协调节点,默认false
:param autoSnapshot: (Optional) 自动快照设置。
:param authConfig: (Optional) es数据面身份验证设置信息
"""
self.vpcId = vpcId
self.subnetId = subnetId
self.instanceVersion = instanceVersion
self.instanceName = instanceName
self.azId = azId
self.instanceClass = instanceClass
self.ipVersion = ipVersion
self.dedicatedMaster = dedicatedMaster
self.coordinating = coordinating
self.autoSnapshot = autoSnapshot
self.authConfig = authConfig
|
tokenize
|
Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
|
# coding=utf-8
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow as tf
import re
import warnings
warnings.filterwarnings('ignore')
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
# MASKED: tokenize function (lines 296-347)
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((33 <= cp <= 47) or (58 <= cp <= 64) or
(91 <= cp <= 96) or (123 <= cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
| 296
| 347
|
# coding=utf-8
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow as tf
import re
import warnings
warnings.filterwarnings('ignore')
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((33 <= cp <= 47) or (58 <= cp <= 64) or
(91 <= cp <= 96) or (123 <= cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
pick_dump_format
|
Choose a supported wave dumping format
fmts is a list of formats that the chosen tool supports. Return the first
that we think is possible (e.g. not fsdb if Verdi is not installed).
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""
Class describing simulation configuration object
"""
import os
import shutil
import subprocess
import sys
from collections import OrderedDict
import logging as log
from tabulate import tabulate
from Deploy import CompileSim, CovAnalyze, CovMerge, CovReport, RunTest, Deploy
from FlowCfg import FlowCfg
from Modes import BuildModes, Modes, Regressions, RunModes, Tests
from testplanner import testplan_utils
from utils import VERBOSE, find_and_substitute_wildcards
# MASKED: pick_dump_format function (lines 24-36)
def resolve_dump_format(tool, dump):
'''Decide on the correct dumping format
This is called after reading the config file. tool is the chosen tool,
which will always have been resolved by this point. waves is a boolean
which determines whether waves should be dumped at all (from the --waves
argument). dump is the dumping format chosen on the command line or None.
'''
assert tool is not None
SUPPORTED_DUMP_FMTS = {
'vcs': ['fsdb', 'vpd'],
'xcelium': ['fsdb', 'shm', 'vpd']
}
# Look up which dumping formats the tool supports
fmts = SUPPORTED_DUMP_FMTS.get(tool)
if dump is not None:
# If the user has specified their preferred dumping format, use it. As
# a sanity check, error out if the chosen tool doesn't support the
# format, but only if we know about the tool. If not, we'll just assume
# they know what they're doing.
if fmts is not None and dump not in fmts:
log.error('Chosen tool ({}) does not support wave '
'dumping format {!r}.'
.format(tool, dump))
sys.exit(1)
return dump
# If the user hasn't specified a dumping format, but has asked for waves,
# we need to decide on a format for them. If fmts is None, we don't know
# about this tool. Maybe it's a new simulator, in which case, default to
# VPD and hope for the best.
if not fmts:
return 'vpd'
return pick_dump_format(fmts)
class SimCfg(FlowCfg):
"""Simulation configuration object
A simulation configuration class holds key information required for building a DV
regression framework.
"""
def __init__(self, flow_cfg_file, proj_root, args):
super().__init__(flow_cfg_file, proj_root, args)
# Options set from command line
self.tool = args.tool
self.build_opts = []
self.build_opts.extend(args.build_opts)
self.en_build_modes = args.build_modes.copy()
self.run_opts = []
self.run_opts.extend(args.run_opts)
self.en_run_modes = []
self.en_run_modes.extend(args.run_modes)
self.build_unique = args.build_unique
self.build_only = args.build_only
self.run_only = args.run_only
self.reseed_ovrd = args.reseed
self.reseed_multiplier = args.reseed_multiplier
self.waves = args.waves
self.max_waves = args.max_waves
self.cov = args.cov
self.cov_merge_previous = args.cov_merge_previous
self.profile = args.profile or '(cfg uses profile without --profile)'
self.xprop_off = args.xprop_off
self.no_rerun = args.no_rerun
self.verbosity = "{" + args.verbosity + "}"
self.verbose = args.verbose
self.dry_run = args.dry_run
self.map_full_testplan = args.map_full_testplan
# Disable cov if --build-only is passed.
if self.build_only:
self.cov = False
# Set default sim modes for unpacking
if self.waves is True:
self.en_build_modes.append("waves")
if self.cov is True:
self.en_build_modes.append("cov")
if args.profile is not None:
self.en_build_modes.append("profile")
if self.xprop_off is not True:
self.en_build_modes.append("xprop")
# Options built from cfg_file files
self.project = ""
self.flow = ""
self.flow_makefile = ""
self.build_dir = ""
self.run_dir = ""
self.sw_build_dir = ""
self.pass_patterns = []
self.fail_patterns = []
self.name = ""
self.dut = ""
self.tb = ""
self.testplan = ""
self.fusesoc_core = ""
self.ral_spec = ""
self.build_modes = []
self.run_modes = []
self.regressions = []
# Options from tools - for building and running tests
self.build_cmd = ""
self.flist_gen_cmd = ""
self.flist_gen_opts = []
self.flist_file = ""
self.run_cmd = ""
# Generated data structures
self.links = {}
self.build_list = []
self.run_list = []
self.cov_merge_deploy = None
self.cov_report_deploy = None
self.results_summary = OrderedDict()
# If is_master_cfg is set, then each cfg will have its own cov_deploy.
# Maintain an array of those in cov_deploys.
self.cov_deploys = []
# Parse the cfg_file file tree
self.parse_flow_cfg(flow_cfg_file)
self._post_parse_flow_cfg()
# Choose a dump format now. Note that this has to happen after parsing
# the configuration format because our choice might depend on the
# chosen tool.
self.dump_fmt = (resolve_dump_format(self.tool, args.dump)
if self.waves else 'none')
# If build_unique is set, then add current timestamp to uniquify it
if self.build_unique:
self.build_dir += "_" + self.timestamp
# Process overrides before substituting the wildcards.
self._process_overrides()
# Make substitutions, while ignoring the following wildcards
# TODO: Find a way to set these in sim cfg instead
ignored_wildcards = [
"build_mode", "index", "test", "seed", "uvm_test", "uvm_test_seq",
"cov_db_dirs", "sw_test", "sw_test_is_prebuilt", "sw_build_device"
]
self.__dict__ = find_and_substitute_wildcards(self.__dict__,
self.__dict__,
ignored_wildcards,
self.is_master_cfg)
# Set the title for simulation results.
self.results_title = self.name.upper() + " Simulation Results"
# Stuff below only pertains to individual cfg (not master cfg)
# or individual selected cfgs (if select_cfgs is configured via command line)
# TODO: find a better way to support select_cfgs
if not self.is_master_cfg and (not self.select_cfgs or
self.name in self.select_cfgs):
# If self.tool is None at this point, there was no --tool argument on
# the command line, and there is no default tool set in the config
# file. That's ok if this is a master config (where the
# sub-configurations can choose tools themselves), but not otherwise.
if self.tool is None:
log.error('Config file does not specify a default tool, '
'and there was no --tool argument on the command line.')
sys.exit(1)
# Print info:
log.info("[scratch_dir]: [%s]: [%s]", self.name, self.scratch_path)
# Set directories with links for ease of debug / triage.
self.links = {
"D": self.scratch_path + "/" + "dispatched",
"P": self.scratch_path + "/" + "passed",
"F": self.scratch_path + "/" + "failed",
"K": self.scratch_path + "/" + "killed"
}
# Use the default build mode for tests that do not specify it
if not hasattr(self, "build_mode"):
setattr(self, "build_mode", "default")
self._process_exports()
# Create objects from raw dicts - build_modes, sim_modes, run_modes,
# tests and regressions, only if not a master cfg obj
self._create_objects()
# Post init checks
self.__post_init__()
def __post_init__(self):
# Run some post init checks
super().__post_init__()
def kill(self):
'''kill running processes and jobs gracefully
'''
super().kill()
for item in self.cov_deploys:
item.kill()
# Purge the output directories. This operates on self.
def _purge(self):
if self.scratch_path:
try:
log.info("Purging scratch path %s", self.scratch_path)
os.system("/bin/rm -rf " + self.scratch_path)
except IOError:
log.error('Failed to purge scratch directory %s',
self.scratch_path)
def _create_objects(self):
# Create build and run modes objects
self.build_modes = Modes.create_modes(BuildModes, self.build_modes)
self.run_modes = Modes.create_modes(RunModes, self.run_modes)
# Walk through build modes enabled on the CLI and append the opts
for en_build_mode in self.en_build_modes:
build_mode_obj = Modes.find_mode(en_build_mode, self.build_modes)
if build_mode_obj is not None:
self.build_opts.extend(build_mode_obj.build_opts)
self.run_opts.extend(build_mode_obj.run_opts)
else:
log.error(
"Mode \"%s\" enabled on the the command line is not defined",
en_build_mode)
sys.exit(1)
# Walk through run modes enabled on the CLI and append the opts
for en_run_mode in self.en_run_modes:
run_mode_obj = Modes.find_mode(en_run_mode, self.run_modes)
if run_mode_obj is not None:
self.run_opts.extend(run_mode_obj.run_opts)
else:
log.error(
"Mode \"%s\" enabled on the the command line is not defined",
en_run_mode)
sys.exit(1)
# Create tests from given list of items
tests = Tests.create_tests(getattr(self, "tests"), self)
setattr(self, "tests", tests)
# Regressions
# Parse testplan if provided.
if self.testplan != "":
self.testplan = testplan_utils.parse_testplan(self.testplan)
# Extract tests in each milestone and add them as regression target.
self.regressions.extend(self.testplan.get_milestone_regressions())
# Create regressions
regressions = Regressions.create_regressions(
getattr(self, "regressions"), self, tests)
setattr(self, "regressions", regressions)
def _print_list(self):
for list_item in self.list_items:
log.info("---- List of %s in %s ----", list_item, self.name)
if hasattr(self, list_item):
items = getattr(self, list_item)
for item in items:
log.info(item)
else:
log.error("Item %s does not exist!", list_item)
def _create_build_and_run_list(self):
# Walk through the list of items to run and create the build and run
# objects.
# Allow multiple regressions to run as long as the do not enable
# sim_modes or run_modes
def get_overlapping_tests(tests, run_list_names):
overlapping_tests = []
for test in tests:
if test.name in run_list_names:
overlapping_tests.append(test)
return overlapping_tests
def prune_items(items, marked_items):
pruned_items = []
for item in items:
if item not in marked_items:
pruned_items.append(item)
return pruned_items
# Check if there are items to run
if self.items == []:
log.error(
"No items provided for running this simulation / regression")
sys.exit(1)
items_list = self.items
run_list_names = []
marked_items = []
# Process regressions first
for regression in self.regressions:
if regression.name in items_list:
overlapping_tests = get_overlapping_tests(
regression.tests, run_list_names)
if overlapping_tests != []:
log.error(
"Regression \"%s\" added for run contains tests that overlap with "
"other regressions added. This can result in conflicting "
"build / run_opts to be set causing unexpected results.",
regression.name)
sys.exit(1)
self.run_list.extend(regression.tests)
# Merge regression's build and run opts with its tests and their
# build_modes
regression.merge_regression_opts()
run_list_names.extend(regression.test_names)
marked_items.append(regression.name)
items_list = prune_items(items_list, marked_items)
# Process individual tests
for test in self.tests:
if test.name in items_list:
overlapping_tests = get_overlapping_tests([test],
run_list_names)
if overlapping_tests == []:
self.run_list.append(test)
run_list_names.append(test.name)
marked_items.append(test.name)
items_list = prune_items(items_list, marked_items)
# Merge the global build and run opts
Tests.merge_global_opts(self.run_list, self.build_opts, self.run_opts)
# Check if all items have been processed
if items_list != []:
log.error(
"The items %s added for run were not found in \n%s!\n "
"Use the --list switch to see a list of available "
"tests / regressions.", items_list, self.flow_cfg_file)
# Process reseed override and create the build_list
build_list_names = []
for test in self.run_list:
# Override reseed if available.
if self.reseed_ovrd is not None:
test.reseed = self.reseed_ovrd
# Apply reseed multiplier if set on the command line.
test.reseed *= self.reseed_multiplier
# Create the unique set of builds needed.
if test.build_mode.name not in build_list_names:
self.build_list.append(test.build_mode)
build_list_names.append(test.build_mode.name)
def _create_dirs(self):
'''Create initial set of directories
'''
# Invoking system calls has a performance penalty.
# Construct a single command line chained with '&&' to invoke
# the system call only once, rather than multiple times.
create_link_dirs_cmd = ""
for link in self.links.keys():
create_link_dirs_cmd += "/bin/rm -rf " + self.links[link] + " && "
create_link_dirs_cmd += "mkdir -p " + self.links[link] + " && "
create_link_dirs_cmd += " true"
try:
os.system(create_link_dirs_cmd)
except IOError:
log.error("Error running when running the cmd \"%s\"",
create_link_dirs_cmd)
sys.exit(1)
def _create_deploy_objects(self):
'''Create deploy objects from the build and run lists.
'''
# Create the build and run list first
self._create_build_and_run_list()
builds = []
build_map = {}
for build in self.build_list:
item = CompileSim(build, self)
builds.append(item)
build_map[build] = item
runs = []
for test in self.run_list:
for num in range(test.reseed):
item = RunTest(num, test, self)
if self.build_only is False:
build_map[test.build_mode].sub.append(item)
runs.append(item)
self.builds = builds
self.runs = runs
if self.run_only is True:
self.deploy = runs
else:
self.deploy = builds
# Create cov_merge and cov_report objects
if self.cov:
self.cov_merge_deploy = CovMerge(self)
self.cov_report_deploy = CovReport(self)
# Generate reports only if merge was successful; add it as a dependency
# of merge.
self.cov_merge_deploy.sub.append(self.cov_report_deploy)
# Create initial set of directories before kicking off the regression.
self._create_dirs()
def create_deploy_objects(self):
'''Public facing API for _create_deploy_objects().
'''
super().create_deploy_objects()
# Also, create cov_deploys
if self.cov:
for item in self.cfgs:
if item.cov:
self.cov_deploys.append(item.cov_merge_deploy)
# deploy additional commands as needed. We do this separated for coverage
# since that needs to happen at the end.
def deploy_objects(self):
'''This is a public facing API, so we use "self.cfgs" instead of self.
'''
# Invoke the base class method to run the regression.
super().deploy_objects()
# If coverage is enabled, then deploy the coverage tasks.
if self.cov:
Deploy.deploy(self.cov_deploys)
def _cov_analyze(self):
'''Use the last regression coverage data to open up the GUI tool to
analyze the coverage.
'''
cov_analyze_deploy = CovAnalyze(self)
self.deploy = [cov_analyze_deploy]
def cov_analyze(self):
'''Public facing API for analyzing coverage.
'''
for item in self.cfgs:
item._cov_analyze()
def _gen_results(self):
'''
The function is called after the regression has completed. It collates the
status of all run targets and generates a dict. It parses the testplan and
maps the generated result to the testplan entries to generate a final table
(list). It also prints the full list of failures for debug / triage. If cov
is enabled, then the summary coverage report is also generated. The final
result is in markdown format.
'''
# TODO: add support for html
def retrieve_result(name, results):
for item in results:
if name == item["name"]:
return item
return None
def gen_results_sub(items, results, fail_msgs):
'''
Generate the results table from the test runs (builds are ignored).
The table has 3 columns - name, passing and total as a list of dicts.
This is populated for all tests. The number of passing and total is
in reference to the number of iterations or reseeds for that test.
This list of dicts is directly consumed by the Testplan::results_table
method for testplan mapping / annotation.
'''
for item in items:
if item.status == "F":
fail_msgs += item.fail_msg
# Generate results table for runs.
if item.target == "run":
result = retrieve_result(item.name, results)
if result is None:
result = {"name": item.name, "passing": 0, "total": 0}
results.append(result)
if item.status == "P":
result["passing"] += 1
result["total"] += 1
(results, fail_msgs) = gen_results_sub(item.sub, results,
fail_msgs)
return (results, fail_msgs)
regr_results = []
fail_msgs = ""
deployed_items = self.deploy
if self.cov:
deployed_items.append(self.cov_merge_deploy)
(regr_results, fail_msgs) = gen_results_sub(deployed_items,
regr_results, fail_msgs)
# Add title if there are indeed failures
if fail_msgs != "":
fail_msgs = "\n## List of Failures\n" + fail_msgs
self.errors_seen = True
# Generate results table for runs.
results_str = "## " + self.results_title + "\n"
results_str += "### " + self.timestamp_long + "\n"
# Add path to testplan.
if hasattr(self, "testplan_doc_path"):
testplan = "https://" + self.doc_server + '/' + getattr(
self, "testplan_doc_path")
else:
testplan = "https://" + self.doc_server + '/' + self.rel_path
testplan = testplan.replace("/dv", "/doc/dv_plan/#testplan")
results_str += "### [Testplan](" + testplan + ")\n"
results_str += "### Simulator: " + self.tool.upper() + "\n\n"
if regr_results == []:
results_str += "No results to display.\n"
else:
# TODO: check if testplan is not null?
# Map regr results to the testplan entries.
results_str += self.testplan.results_table(
regr_results=regr_results,
map_full_testplan=self.map_full_testplan)
results_str += "\n"
self.results_summary = self.testplan.results_summary
# Append coverage results of coverage was enabled.
if self.cov:
if self.cov_report_deploy.status == "P":
results_str += "\n## Coverage Results\n"
# Link the dashboard page using "cov_report_page" value.
if hasattr(self, "cov_report_page"):
results_str += "\n### [Coverage Dashboard]"
results_str += "({})\n\n".format(
getattr(self, "cov_report_page"))
results_str += self.cov_report_deploy.cov_results
self.results_summary[
"Coverage"] = self.cov_report_deploy.cov_total
else:
self.results_summary["Coverage"] = "--"
# append link of detail result to block name
self.results_summary["Name"] = self._get_results_page_link(
self.results_summary["Name"])
# Append failures for triage
self.results_md = results_str + fail_msgs
results_str += fail_msgs
# Write results to the scratch area
results_file = self.scratch_path + "/results_" + self.timestamp + ".md"
f = open(results_file, 'w')
f.write(self.results_md)
f.close()
# Return only the tables
log.info("[results page]: [%s] [%s]", self.name, results_file)
return results_str
def gen_results_summary(self):
# sim summary result has 5 columns from each SimCfg.results_summary
header = ["Name", "Passing", "Total", "Pass Rate"]
if self.cov:
header.append('Coverage')
table = [header]
colalign = ("center", ) * len(header)
for item in self.cfgs:
row = []
for title in item.results_summary:
row.append(item.results_summary[title])
if row == []:
continue
table.append(row)
self.results_summary_md = "## " + self.results_title + " (Summary)\n"
self.results_summary_md += "### " + self.timestamp_long + "\n"
self.results_summary_md += tabulate(table,
headers="firstrow",
tablefmt="pipe",
colalign=colalign)
print(self.results_summary_md)
return self.results_summary_md
def _publish_results(self):
'''Publish coverage results to the opentitan web server.'''
super()._publish_results()
if self.cov:
results_server_dir_url = self.results_server_dir.replace(
self.results_server_prefix, self.results_server_url_prefix)
log.info("Publishing coverage results to %s",
results_server_dir_url)
cmd = (self.results_server_cmd + " -m cp -R " +
self.cov_report_deploy.cov_report_dir + " " + self.results_server_dir)
try:
cmd_output = subprocess.run(args=cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
except Exception as e:
log.error("%s: Failed to publish results:\n\"%s\"", e,
str(cmd))
|
def pick_dump_format(fmts):
'''Choose a supported wave dumping format
fmts is a list of formats that the chosen tool supports. Return the first
that we think is possible (e.g. not fsdb if Verdi is not installed).
'''
assert fmts
fmt = fmts[0]
if fmt == 'fsdb' and not shutil.which('verdi'):
return pick_dump_format(fmts[1:])
return fmt
| 24
| 36
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""
Class describing simulation configuration object
"""
import os
import shutil
import subprocess
import sys
from collections import OrderedDict
import logging as log
from tabulate import tabulate
from Deploy import CompileSim, CovAnalyze, CovMerge, CovReport, RunTest, Deploy
from FlowCfg import FlowCfg
from Modes import BuildModes, Modes, Regressions, RunModes, Tests
from testplanner import testplan_utils
from utils import VERBOSE, find_and_substitute_wildcards
def pick_dump_format(fmts):
'''Choose a supported wave dumping format
fmts is a list of formats that the chosen tool supports. Return the first
that we think is possible (e.g. not fsdb if Verdi is not installed).
'''
assert fmts
fmt = fmts[0]
if fmt == 'fsdb' and not shutil.which('verdi'):
return pick_dump_format(fmts[1:])
return fmt
def resolve_dump_format(tool, dump):
'''Decide on the correct dumping format
This is called after reading the config file. tool is the chosen tool,
which will always have been resolved by this point. waves is a boolean
which determines whether waves should be dumped at all (from the --waves
argument). dump is the dumping format chosen on the command line or None.
'''
assert tool is not None
SUPPORTED_DUMP_FMTS = {
'vcs': ['fsdb', 'vpd'],
'xcelium': ['fsdb', 'shm', 'vpd']
}
# Look up which dumping formats the tool supports
fmts = SUPPORTED_DUMP_FMTS.get(tool)
if dump is not None:
# If the user has specified their preferred dumping format, use it. As
# a sanity check, error out if the chosen tool doesn't support the
# format, but only if we know about the tool. If not, we'll just assume
# they know what they're doing.
if fmts is not None and dump not in fmts:
log.error('Chosen tool ({}) does not support wave '
'dumping format {!r}.'
.format(tool, dump))
sys.exit(1)
return dump
# If the user hasn't specified a dumping format, but has asked for waves,
# we need to decide on a format for them. If fmts is None, we don't know
# about this tool. Maybe it's a new simulator, in which case, default to
# VPD and hope for the best.
if not fmts:
return 'vpd'
return pick_dump_format(fmts)
class SimCfg(FlowCfg):
"""Simulation configuration object
A simulation configuration class holds key information required for building a DV
regression framework.
"""
def __init__(self, flow_cfg_file, proj_root, args):
super().__init__(flow_cfg_file, proj_root, args)
# Options set from command line
self.tool = args.tool
self.build_opts = []
self.build_opts.extend(args.build_opts)
self.en_build_modes = args.build_modes.copy()
self.run_opts = []
self.run_opts.extend(args.run_opts)
self.en_run_modes = []
self.en_run_modes.extend(args.run_modes)
self.build_unique = args.build_unique
self.build_only = args.build_only
self.run_only = args.run_only
self.reseed_ovrd = args.reseed
self.reseed_multiplier = args.reseed_multiplier
self.waves = args.waves
self.max_waves = args.max_waves
self.cov = args.cov
self.cov_merge_previous = args.cov_merge_previous
self.profile = args.profile or '(cfg uses profile without --profile)'
self.xprop_off = args.xprop_off
self.no_rerun = args.no_rerun
self.verbosity = "{" + args.verbosity + "}"
self.verbose = args.verbose
self.dry_run = args.dry_run
self.map_full_testplan = args.map_full_testplan
# Disable cov if --build-only is passed.
if self.build_only:
self.cov = False
# Set default sim modes for unpacking
if self.waves is True:
self.en_build_modes.append("waves")
if self.cov is True:
self.en_build_modes.append("cov")
if args.profile is not None:
self.en_build_modes.append("profile")
if self.xprop_off is not True:
self.en_build_modes.append("xprop")
# Options built from cfg_file files
self.project = ""
self.flow = ""
self.flow_makefile = ""
self.build_dir = ""
self.run_dir = ""
self.sw_build_dir = ""
self.pass_patterns = []
self.fail_patterns = []
self.name = ""
self.dut = ""
self.tb = ""
self.testplan = ""
self.fusesoc_core = ""
self.ral_spec = ""
self.build_modes = []
self.run_modes = []
self.regressions = []
# Options from tools - for building and running tests
self.build_cmd = ""
self.flist_gen_cmd = ""
self.flist_gen_opts = []
self.flist_file = ""
self.run_cmd = ""
# Generated data structures
self.links = {}
self.build_list = []
self.run_list = []
self.cov_merge_deploy = None
self.cov_report_deploy = None
self.results_summary = OrderedDict()
# If is_master_cfg is set, then each cfg will have its own cov_deploy.
# Maintain an array of those in cov_deploys.
self.cov_deploys = []
# Parse the cfg_file file tree
self.parse_flow_cfg(flow_cfg_file)
self._post_parse_flow_cfg()
# Choose a dump format now. Note that this has to happen after parsing
# the configuration format because our choice might depend on the
# chosen tool.
self.dump_fmt = (resolve_dump_format(self.tool, args.dump)
if self.waves else 'none')
# If build_unique is set, then add current timestamp to uniquify it
if self.build_unique:
self.build_dir += "_" + self.timestamp
# Process overrides before substituting the wildcards.
self._process_overrides()
# Make substitutions, while ignoring the following wildcards
# TODO: Find a way to set these in sim cfg instead
ignored_wildcards = [
"build_mode", "index", "test", "seed", "uvm_test", "uvm_test_seq",
"cov_db_dirs", "sw_test", "sw_test_is_prebuilt", "sw_build_device"
]
self.__dict__ = find_and_substitute_wildcards(self.__dict__,
self.__dict__,
ignored_wildcards,
self.is_master_cfg)
# Set the title for simulation results.
self.results_title = self.name.upper() + " Simulation Results"
# Stuff below only pertains to individual cfg (not master cfg)
# or individual selected cfgs (if select_cfgs is configured via command line)
# TODO: find a better way to support select_cfgs
if not self.is_master_cfg and (not self.select_cfgs or
self.name in self.select_cfgs):
# If self.tool is None at this point, there was no --tool argument on
# the command line, and there is no default tool set in the config
# file. That's ok if this is a master config (where the
# sub-configurations can choose tools themselves), but not otherwise.
if self.tool is None:
log.error('Config file does not specify a default tool, '
'and there was no --tool argument on the command line.')
sys.exit(1)
# Print info:
log.info("[scratch_dir]: [%s]: [%s]", self.name, self.scratch_path)
# Set directories with links for ease of debug / triage.
self.links = {
"D": self.scratch_path + "/" + "dispatched",
"P": self.scratch_path + "/" + "passed",
"F": self.scratch_path + "/" + "failed",
"K": self.scratch_path + "/" + "killed"
}
# Use the default build mode for tests that do not specify it
if not hasattr(self, "build_mode"):
setattr(self, "build_mode", "default")
self._process_exports()
# Create objects from raw dicts - build_modes, sim_modes, run_modes,
# tests and regressions, only if not a master cfg obj
self._create_objects()
# Post init checks
self.__post_init__()
def __post_init__(self):
# Run some post init checks
super().__post_init__()
def kill(self):
'''kill running processes and jobs gracefully
'''
super().kill()
for item in self.cov_deploys:
item.kill()
# Purge the output directories. This operates on self.
def _purge(self):
if self.scratch_path:
try:
log.info("Purging scratch path %s", self.scratch_path)
os.system("/bin/rm -rf " + self.scratch_path)
except IOError:
log.error('Failed to purge scratch directory %s',
self.scratch_path)
def _create_objects(self):
# Create build and run modes objects
self.build_modes = Modes.create_modes(BuildModes, self.build_modes)
self.run_modes = Modes.create_modes(RunModes, self.run_modes)
# Walk through build modes enabled on the CLI and append the opts
for en_build_mode in self.en_build_modes:
build_mode_obj = Modes.find_mode(en_build_mode, self.build_modes)
if build_mode_obj is not None:
self.build_opts.extend(build_mode_obj.build_opts)
self.run_opts.extend(build_mode_obj.run_opts)
else:
log.error(
"Mode \"%s\" enabled on the the command line is not defined",
en_build_mode)
sys.exit(1)
# Walk through run modes enabled on the CLI and append the opts
for en_run_mode in self.en_run_modes:
run_mode_obj = Modes.find_mode(en_run_mode, self.run_modes)
if run_mode_obj is not None:
self.run_opts.extend(run_mode_obj.run_opts)
else:
log.error(
"Mode \"%s\" enabled on the the command line is not defined",
en_run_mode)
sys.exit(1)
# Create tests from given list of items
tests = Tests.create_tests(getattr(self, "tests"), self)
setattr(self, "tests", tests)
# Regressions
# Parse testplan if provided.
if self.testplan != "":
self.testplan = testplan_utils.parse_testplan(self.testplan)
# Extract tests in each milestone and add them as regression target.
self.regressions.extend(self.testplan.get_milestone_regressions())
# Create regressions
regressions = Regressions.create_regressions(
getattr(self, "regressions"), self, tests)
setattr(self, "regressions", regressions)
def _print_list(self):
for list_item in self.list_items:
log.info("---- List of %s in %s ----", list_item, self.name)
if hasattr(self, list_item):
items = getattr(self, list_item)
for item in items:
log.info(item)
else:
log.error("Item %s does not exist!", list_item)
def _create_build_and_run_list(self):
# Walk through the list of items to run and create the build and run
# objects.
# Allow multiple regressions to run as long as the do not enable
# sim_modes or run_modes
def get_overlapping_tests(tests, run_list_names):
overlapping_tests = []
for test in tests:
if test.name in run_list_names:
overlapping_tests.append(test)
return overlapping_tests
def prune_items(items, marked_items):
pruned_items = []
for item in items:
if item not in marked_items:
pruned_items.append(item)
return pruned_items
# Check if there are items to run
if self.items == []:
log.error(
"No items provided for running this simulation / regression")
sys.exit(1)
items_list = self.items
run_list_names = []
marked_items = []
# Process regressions first
for regression in self.regressions:
if regression.name in items_list:
overlapping_tests = get_overlapping_tests(
regression.tests, run_list_names)
if overlapping_tests != []:
log.error(
"Regression \"%s\" added for run contains tests that overlap with "
"other regressions added. This can result in conflicting "
"build / run_opts to be set causing unexpected results.",
regression.name)
sys.exit(1)
self.run_list.extend(regression.tests)
# Merge regression's build and run opts with its tests and their
# build_modes
regression.merge_regression_opts()
run_list_names.extend(regression.test_names)
marked_items.append(regression.name)
items_list = prune_items(items_list, marked_items)
# Process individual tests
for test in self.tests:
if test.name in items_list:
overlapping_tests = get_overlapping_tests([test],
run_list_names)
if overlapping_tests == []:
self.run_list.append(test)
run_list_names.append(test.name)
marked_items.append(test.name)
items_list = prune_items(items_list, marked_items)
# Merge the global build and run opts
Tests.merge_global_opts(self.run_list, self.build_opts, self.run_opts)
# Check if all items have been processed
if items_list != []:
log.error(
"The items %s added for run were not found in \n%s!\n "
"Use the --list switch to see a list of available "
"tests / regressions.", items_list, self.flow_cfg_file)
# Process reseed override and create the build_list
build_list_names = []
for test in self.run_list:
# Override reseed if available.
if self.reseed_ovrd is not None:
test.reseed = self.reseed_ovrd
# Apply reseed multiplier if set on the command line.
test.reseed *= self.reseed_multiplier
# Create the unique set of builds needed.
if test.build_mode.name not in build_list_names:
self.build_list.append(test.build_mode)
build_list_names.append(test.build_mode.name)
def _create_dirs(self):
'''Create initial set of directories
'''
# Invoking system calls has a performance penalty.
# Construct a single command line chained with '&&' to invoke
# the system call only once, rather than multiple times.
create_link_dirs_cmd = ""
for link in self.links.keys():
create_link_dirs_cmd += "/bin/rm -rf " + self.links[link] + " && "
create_link_dirs_cmd += "mkdir -p " + self.links[link] + " && "
create_link_dirs_cmd += " true"
try:
os.system(create_link_dirs_cmd)
except IOError:
log.error("Error running when running the cmd \"%s\"",
create_link_dirs_cmd)
sys.exit(1)
def _create_deploy_objects(self):
'''Create deploy objects from the build and run lists.
'''
# Create the build and run list first
self._create_build_and_run_list()
builds = []
build_map = {}
for build in self.build_list:
item = CompileSim(build, self)
builds.append(item)
build_map[build] = item
runs = []
for test in self.run_list:
for num in range(test.reseed):
item = RunTest(num, test, self)
if self.build_only is False:
build_map[test.build_mode].sub.append(item)
runs.append(item)
self.builds = builds
self.runs = runs
if self.run_only is True:
self.deploy = runs
else:
self.deploy = builds
# Create cov_merge and cov_report objects
if self.cov:
self.cov_merge_deploy = CovMerge(self)
self.cov_report_deploy = CovReport(self)
# Generate reports only if merge was successful; add it as a dependency
# of merge.
self.cov_merge_deploy.sub.append(self.cov_report_deploy)
# Create initial set of directories before kicking off the regression.
self._create_dirs()
def create_deploy_objects(self):
'''Public facing API for _create_deploy_objects().
'''
super().create_deploy_objects()
# Also, create cov_deploys
if self.cov:
for item in self.cfgs:
if item.cov:
self.cov_deploys.append(item.cov_merge_deploy)
# deploy additional commands as needed. We do this separated for coverage
# since that needs to happen at the end.
def deploy_objects(self):
'''This is a public facing API, so we use "self.cfgs" instead of self.
'''
# Invoke the base class method to run the regression.
super().deploy_objects()
# If coverage is enabled, then deploy the coverage tasks.
if self.cov:
Deploy.deploy(self.cov_deploys)
def _cov_analyze(self):
'''Use the last regression coverage data to open up the GUI tool to
analyze the coverage.
'''
cov_analyze_deploy = CovAnalyze(self)
self.deploy = [cov_analyze_deploy]
def cov_analyze(self):
'''Public facing API for analyzing coverage.
'''
for item in self.cfgs:
item._cov_analyze()
def _gen_results(self):
'''
The function is called after the regression has completed. It collates the
status of all run targets and generates a dict. It parses the testplan and
maps the generated result to the testplan entries to generate a final table
(list). It also prints the full list of failures for debug / triage. If cov
is enabled, then the summary coverage report is also generated. The final
result is in markdown format.
'''
# TODO: add support for html
def retrieve_result(name, results):
for item in results:
if name == item["name"]:
return item
return None
def gen_results_sub(items, results, fail_msgs):
'''
Generate the results table from the test runs (builds are ignored).
The table has 3 columns - name, passing and total as a list of dicts.
This is populated for all tests. The number of passing and total is
in reference to the number of iterations or reseeds for that test.
This list of dicts is directly consumed by the Testplan::results_table
method for testplan mapping / annotation.
'''
for item in items:
if item.status == "F":
fail_msgs += item.fail_msg
# Generate results table for runs.
if item.target == "run":
result = retrieve_result(item.name, results)
if result is None:
result = {"name": item.name, "passing": 0, "total": 0}
results.append(result)
if item.status == "P":
result["passing"] += 1
result["total"] += 1
(results, fail_msgs) = gen_results_sub(item.sub, results,
fail_msgs)
return (results, fail_msgs)
regr_results = []
fail_msgs = ""
deployed_items = self.deploy
if self.cov:
deployed_items.append(self.cov_merge_deploy)
(regr_results, fail_msgs) = gen_results_sub(deployed_items,
regr_results, fail_msgs)
# Add title if there are indeed failures
if fail_msgs != "":
fail_msgs = "\n## List of Failures\n" + fail_msgs
self.errors_seen = True
# Generate results table for runs.
results_str = "## " + self.results_title + "\n"
results_str += "### " + self.timestamp_long + "\n"
# Add path to testplan.
if hasattr(self, "testplan_doc_path"):
testplan = "https://" + self.doc_server + '/' + getattr(
self, "testplan_doc_path")
else:
testplan = "https://" + self.doc_server + '/' + self.rel_path
testplan = testplan.replace("/dv", "/doc/dv_plan/#testplan")
results_str += "### [Testplan](" + testplan + ")\n"
results_str += "### Simulator: " + self.tool.upper() + "\n\n"
if regr_results == []:
results_str += "No results to display.\n"
else:
# TODO: check if testplan is not null?
# Map regr results to the testplan entries.
results_str += self.testplan.results_table(
regr_results=regr_results,
map_full_testplan=self.map_full_testplan)
results_str += "\n"
self.results_summary = self.testplan.results_summary
# Append coverage results of coverage was enabled.
if self.cov:
if self.cov_report_deploy.status == "P":
results_str += "\n## Coverage Results\n"
# Link the dashboard page using "cov_report_page" value.
if hasattr(self, "cov_report_page"):
results_str += "\n### [Coverage Dashboard]"
results_str += "({})\n\n".format(
getattr(self, "cov_report_page"))
results_str += self.cov_report_deploy.cov_results
self.results_summary[
"Coverage"] = self.cov_report_deploy.cov_total
else:
self.results_summary["Coverage"] = "--"
# append link of detail result to block name
self.results_summary["Name"] = self._get_results_page_link(
self.results_summary["Name"])
# Append failures for triage
self.results_md = results_str + fail_msgs
results_str += fail_msgs
# Write results to the scratch area
results_file = self.scratch_path + "/results_" + self.timestamp + ".md"
f = open(results_file, 'w')
f.write(self.results_md)
f.close()
# Return only the tables
log.info("[results page]: [%s] [%s]", self.name, results_file)
return results_str
def gen_results_summary(self):
# sim summary result has 5 columns from each SimCfg.results_summary
header = ["Name", "Passing", "Total", "Pass Rate"]
if self.cov:
header.append('Coverage')
table = [header]
colalign = ("center", ) * len(header)
for item in self.cfgs:
row = []
for title in item.results_summary:
row.append(item.results_summary[title])
if row == []:
continue
table.append(row)
self.results_summary_md = "## " + self.results_title + " (Summary)\n"
self.results_summary_md += "### " + self.timestamp_long + "\n"
self.results_summary_md += tabulate(table,
headers="firstrow",
tablefmt="pipe",
colalign=colalign)
print(self.results_summary_md)
return self.results_summary_md
def _publish_results(self):
'''Publish coverage results to the opentitan web server.'''
super()._publish_results()
if self.cov:
results_server_dir_url = self.results_server_dir.replace(
self.results_server_prefix, self.results_server_url_prefix)
log.info("Publishing coverage results to %s",
results_server_dir_url)
cmd = (self.results_server_cmd + " -m cp -R " +
self.cov_report_deploy.cov_report_dir + " " + self.results_server_dir)
try:
cmd_output = subprocess.run(args=cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
except Exception as e:
log.error("%s: Failed to publish results:\n\"%s\"", e,
str(cmd))
|
_get_fixed_ip_address
|
Get a port's fixed ip address.
:param port_uuid: Neutron port id.
:param client: Neutron client instance.
:returns: Neutron port ip address.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
|
#
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import keystone
from ironic.common import network
from ironic.dhcp import base
from ironic.drivers.modules import ssh
neutron_opts = [
cfg.StrOpt('url',
default='http://$my_ip:9696',
help='URL for connecting to neutron.'),
cfg.IntOpt('url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds.'),
cfg.IntOpt('retries',
default=3,
help='Client retries in the case of a failed request.'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Default authentication strategy to use when connecting '
'to neutron. Can be either "keystone" or "noauth". '
'Running neutron in noauth mode (related to but not '
'affected by this setting) is insecure and should only be '
'used for testing.'),
cfg.StrOpt('cleaning_network_uuid',
help='UUID of the network to create Neutron ports on when '
'booting to a ramdisk for cleaning/zapping using Neutron '
'DHCP')
]
CONF = cfg.CONF
CONF.import_opt('my_ip', 'ironic.netconf')
CONF.register_opts(neutron_opts, group='neutron')
LOG = logging.getLogger(__name__)
def _build_client(token=None):
"""Utility function to create Neutron client."""
params = {
'timeout': CONF.neutron.url_timeout,
'retries': CONF.neutron.retries,
'insecure': CONF.keystone_authtoken.insecure,
'ca_cert': CONF.keystone_authtoken.certfile,
}
if CONF.neutron.auth_strategy not in ['noauth', 'keystone']:
raise exception.ConfigInvalid(_('Neutron auth_strategy should be '
'either "noauth" or "keystone".'))
if CONF.neutron.auth_strategy == 'noauth':
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = 'noauth'
elif (CONF.neutron.auth_strategy == 'keystone' and
token is None):
params['endpoint_url'] = (CONF.neutron.url or
keystone.get_service_url('neutron'))
params['username'] = CONF.keystone_authtoken.admin_user
params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name
params['password'] = CONF.keystone_authtoken.admin_password
params['auth_url'] = (CONF.keystone_authtoken.auth_uri or '')
if CONF.keystone.region_name:
params['region_name'] = CONF.keystone.region_name
else:
params['token'] = token
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = None
return clientv20.Client(**params)
class NeutronDHCPApi(base.BaseDHCP):
"""API for communicating to neutron 2.x API."""
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
"""Update a port's attributes.
Update one or more DHCP options on the specified port.
For the relevant API spec, see
http://docs.openstack.org/api/openstack-network/2.0/content/extra-dhc-opt-ext-update.html
:param port_id: designate which port these attributes
will be applied to.
:param dhcp_options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param token: optional auth token.
:raises: FailedToUpdateDHCPOptOnPort
"""
port_req_body = {'port': {'extra_dhcp_opts': dhcp_options}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update Neutron port %s."), port_id)
raise exception.FailedToUpdateDHCPOptOnPort(port_id=port_id)
def update_port_address(self, port_id, address, token=None):
"""Update a port's mac address.
:param port_id: Neutron port id.
:param address: new MAC address.
:param token: optional auth token.
:raises: FailedToUpdateMacOnPort
"""
port_req_body = {'port': {'mac_address': address}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update MAC address on Neutron "
"port %s."), port_id)
raise exception.FailedToUpdateMacOnPort(port_id=port_id)
def update_dhcp_opts(self, task, options, vifs=None):
"""Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param vifs: a dict of Neutron port dicts to update DHCP options on.
The keys should be Ironic port UUIDs, and the values should be
Neutron port UUIDs
If the value is None, will get the list of ports from the Ironic
port objects.
"""
if vifs is None:
vifs = network.get_node_vif_ids(task)
if not vifs:
raise exception.FailedToUpdateDHCPOptOnPort(
_("No VIFs found for node %(node)s when attempting "
"to update DHCP BOOT options.") %
{'node': task.node.uuid})
failures = []
for port_id, port_vif in vifs.items():
try:
self.update_port_dhcp_opts(port_vif, options,
token=task.context.auth_token)
except exception.FailedToUpdateDHCPOptOnPort:
failures.append(port_id)
if failures:
if len(failures) == len(vifs):
raise exception.FailedToUpdateDHCPOptOnPort(_(
"Failed to set DHCP BOOT options for any port on node %s.")
% task.node.uuid)
else:
LOG.warning(_LW("Some errors were encountered when updating "
"the DHCP BOOT options for node %(node)s on "
"the following ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
# TODO(adam_g): Hack to workaround bug 1334447 until we have a
# mechanism for synchronizing events with Neutron. We need to sleep
# only if we are booting VMs, which is implied by SSHPower, to ensure
# they do not boot before Neutron agents have setup sufficient DHCP
# config for netboot.
if isinstance(task.driver.power, ssh.SSHPower):
LOG.debug("Waiting 15 seconds for Neutron.")
time.sleep(15)
# MASKED: _get_fixed_ip_address function (lines 203-238)
def _get_port_ip_address(self, task, port_uuid, client):
"""Get ip address of ironic port assigned by neutron.
:param task: a TaskManager instance.
:param port_uuid: ironic Node's port UUID.
:param client: Neutron client instance.
:returns: Neutron port ip address associated with Node's port.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
vifs = network.get_node_vif_ids(task)
if not vifs:
LOG.warning(_LW("No VIFs found for node %(node)s when attempting "
" to get port IP address."),
{'node': task.node.uuid})
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
port_vif = vifs[port_uuid]
port_ip_address = self._get_fixed_ip_address(port_vif, client)
return port_ip_address
def get_ip_addresses(self, task):
"""Get IP addresses for all ports in `task`.
:param task: a TaskManager instance.
:returns: List of IP addresses associated with task.ports.
"""
client = _build_client(task.context.auth_token)
failures = []
ip_addresses = []
for port in task.ports:
try:
port_ip_address = self._get_port_ip_address(task, port.uuid,
client)
ip_addresses.append(port_ip_address)
except (exception.FailedToGetIPAddressOnPort,
exception.InvalidIPv4Address):
failures.append(port.uuid)
if failures:
LOG.warn(_LW("Some errors were encountered on node %(node)s"
" while retrieving IP address on the following"
" ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
return ip_addresses
def create_cleaning_ports(self, task):
"""Create neutron ports for each port on task.node to boot the ramdisk.
:param task: a TaskManager instance.
:raises: InvalidParameterValue if the cleaning network is None
:returns: a dictionary in the form {port.uuid: neutron_port['id']}
"""
if not CONF.neutron.cleaning_network_uuid:
raise exception.InvalidParameterValue(_('Valid cleaning network '
'UUID not provided'))
neutron_client = _build_client(task.context.auth_token)
body = {
'port': {
'network_id': CONF.neutron.cleaning_network_uuid,
'admin_state_up': True,
}
}
ports = {}
for ironic_port in task.ports:
body['port']['mac_address'] = ironic_port.address
try:
port = neutron_client.create_port(body)
except neutron_client_exc.ConnectionFailed as e:
self._rollback_cleaning_ports(task)
msg = (_('Could not create cleaning port on network %(net)s '
'from %(node)s. %(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
if not port.get('port') or not port['port'].get('id'):
self._rollback_cleaning_ports(task)
msg = (_('Failed to create cleaning ports for node '
'%(node)s') % task.node.uuid)
LOG.error(msg)
raise exception.NodeCleaningFailure(msg)
# Match return value of get_node_vif_ids()
ports[ironic_port.uuid] = port['port']['id']
return ports
def delete_cleaning_ports(self, task):
"""Deletes the neutron port created for booting the ramdisk.
:param task: a TaskManager instance.
"""
neutron_client = _build_client(task.context.auth_token)
macs = [p.address for p in task.ports]
params = {
'network_id': CONF.neutron.cleaning_network_uuid
}
try:
ports = neutron_client.list_ports(**params)
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not get cleaning network vif for %(node)s '
'from Neutron, possible network issue. %(exc)s') %
{'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
# Iterate the list of Neutron port dicts, remove the ones we added
for neutron_port in ports.get('ports', []):
# Only delete ports using the node's mac addresses
if neutron_port.get('mac_address') in macs:
try:
neutron_client.delete_port(neutron_port.get('id'))
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not remove cleaning ports on network '
'%(net)s from %(node)s, possible network issue. '
'%(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
def _rollback_cleaning_ports(self, task):
"""Attempts to delete any ports created by cleaning
Purposefully will not raise any exceptions so error handling can
continue.
:param task: a TaskManager instance.
"""
try:
self.delete_cleaning_ports(task)
except Exception:
# Log the error, but let the caller invoke the
# manager.cleaning_error_handler().
LOG.exception(_LE('Failed to rollback cleaning port '
'changes for node %s') % task.node.uuid)
|
def _get_fixed_ip_address(self, port_uuid, client):
"""Get a port's fixed ip address.
:param port_uuid: Neutron port id.
:param client: Neutron client instance.
:returns: Neutron port ip address.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
ip_address = None
try:
neutron_port = client.show_port(port_uuid).get('port')
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to Get IP address on Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
fixed_ips = neutron_port.get('fixed_ips')
# NOTE(faizan) At present only the first fixed_ip assigned to this
# neutron port will be used, since nova allocates only one fixed_ip
# for the instance.
if fixed_ips:
ip_address = fixed_ips[0].get('ip_address', None)
if ip_address:
if netutils.is_valid_ipv4(ip_address):
return ip_address
else:
LOG.error(_LE("Neutron returned invalid IPv4 address %s."),
ip_address)
raise exception.InvalidIPv4Address(ip_address=ip_address)
else:
LOG.error(_LE("No IP address assigned to Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
| 203
| 238
|
#
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import keystone
from ironic.common import network
from ironic.dhcp import base
from ironic.drivers.modules import ssh
neutron_opts = [
cfg.StrOpt('url',
default='http://$my_ip:9696',
help='URL for connecting to neutron.'),
cfg.IntOpt('url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds.'),
cfg.IntOpt('retries',
default=3,
help='Client retries in the case of a failed request.'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Default authentication strategy to use when connecting '
'to neutron. Can be either "keystone" or "noauth". '
'Running neutron in noauth mode (related to but not '
'affected by this setting) is insecure and should only be '
'used for testing.'),
cfg.StrOpt('cleaning_network_uuid',
help='UUID of the network to create Neutron ports on when '
'booting to a ramdisk for cleaning/zapping using Neutron '
'DHCP')
]
CONF = cfg.CONF
CONF.import_opt('my_ip', 'ironic.netconf')
CONF.register_opts(neutron_opts, group='neutron')
LOG = logging.getLogger(__name__)
def _build_client(token=None):
"""Utility function to create Neutron client."""
params = {
'timeout': CONF.neutron.url_timeout,
'retries': CONF.neutron.retries,
'insecure': CONF.keystone_authtoken.insecure,
'ca_cert': CONF.keystone_authtoken.certfile,
}
if CONF.neutron.auth_strategy not in ['noauth', 'keystone']:
raise exception.ConfigInvalid(_('Neutron auth_strategy should be '
'either "noauth" or "keystone".'))
if CONF.neutron.auth_strategy == 'noauth':
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = 'noauth'
elif (CONF.neutron.auth_strategy == 'keystone' and
token is None):
params['endpoint_url'] = (CONF.neutron.url or
keystone.get_service_url('neutron'))
params['username'] = CONF.keystone_authtoken.admin_user
params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name
params['password'] = CONF.keystone_authtoken.admin_password
params['auth_url'] = (CONF.keystone_authtoken.auth_uri or '')
if CONF.keystone.region_name:
params['region_name'] = CONF.keystone.region_name
else:
params['token'] = token
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = None
return clientv20.Client(**params)
class NeutronDHCPApi(base.BaseDHCP):
"""API for communicating to neutron 2.x API."""
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
"""Update a port's attributes.
Update one or more DHCP options on the specified port.
For the relevant API spec, see
http://docs.openstack.org/api/openstack-network/2.0/content/extra-dhc-opt-ext-update.html
:param port_id: designate which port these attributes
will be applied to.
:param dhcp_options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param token: optional auth token.
:raises: FailedToUpdateDHCPOptOnPort
"""
port_req_body = {'port': {'extra_dhcp_opts': dhcp_options}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update Neutron port %s."), port_id)
raise exception.FailedToUpdateDHCPOptOnPort(port_id=port_id)
def update_port_address(self, port_id, address, token=None):
"""Update a port's mac address.
:param port_id: Neutron port id.
:param address: new MAC address.
:param token: optional auth token.
:raises: FailedToUpdateMacOnPort
"""
port_req_body = {'port': {'mac_address': address}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update MAC address on Neutron "
"port %s."), port_id)
raise exception.FailedToUpdateMacOnPort(port_id=port_id)
def update_dhcp_opts(self, task, options, vifs=None):
"""Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param vifs: a dict of Neutron port dicts to update DHCP options on.
The keys should be Ironic port UUIDs, and the values should be
Neutron port UUIDs
If the value is None, will get the list of ports from the Ironic
port objects.
"""
if vifs is None:
vifs = network.get_node_vif_ids(task)
if not vifs:
raise exception.FailedToUpdateDHCPOptOnPort(
_("No VIFs found for node %(node)s when attempting "
"to update DHCP BOOT options.") %
{'node': task.node.uuid})
failures = []
for port_id, port_vif in vifs.items():
try:
self.update_port_dhcp_opts(port_vif, options,
token=task.context.auth_token)
except exception.FailedToUpdateDHCPOptOnPort:
failures.append(port_id)
if failures:
if len(failures) == len(vifs):
raise exception.FailedToUpdateDHCPOptOnPort(_(
"Failed to set DHCP BOOT options for any port on node %s.")
% task.node.uuid)
else:
LOG.warning(_LW("Some errors were encountered when updating "
"the DHCP BOOT options for node %(node)s on "
"the following ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
# TODO(adam_g): Hack to workaround bug 1334447 until we have a
# mechanism for synchronizing events with Neutron. We need to sleep
# only if we are booting VMs, which is implied by SSHPower, to ensure
# they do not boot before Neutron agents have setup sufficient DHCP
# config for netboot.
if isinstance(task.driver.power, ssh.SSHPower):
LOG.debug("Waiting 15 seconds for Neutron.")
time.sleep(15)
def _get_fixed_ip_address(self, port_uuid, client):
"""Get a port's fixed ip address.
:param port_uuid: Neutron port id.
:param client: Neutron client instance.
:returns: Neutron port ip address.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
ip_address = None
try:
neutron_port = client.show_port(port_uuid).get('port')
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to Get IP address on Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
fixed_ips = neutron_port.get('fixed_ips')
# NOTE(faizan) At present only the first fixed_ip assigned to this
# neutron port will be used, since nova allocates only one fixed_ip
# for the instance.
if fixed_ips:
ip_address = fixed_ips[0].get('ip_address', None)
if ip_address:
if netutils.is_valid_ipv4(ip_address):
return ip_address
else:
LOG.error(_LE("Neutron returned invalid IPv4 address %s."),
ip_address)
raise exception.InvalidIPv4Address(ip_address=ip_address)
else:
LOG.error(_LE("No IP address assigned to Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
def _get_port_ip_address(self, task, port_uuid, client):
"""Get ip address of ironic port assigned by neutron.
:param task: a TaskManager instance.
:param port_uuid: ironic Node's port UUID.
:param client: Neutron client instance.
:returns: Neutron port ip address associated with Node's port.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
vifs = network.get_node_vif_ids(task)
if not vifs:
LOG.warning(_LW("No VIFs found for node %(node)s when attempting "
" to get port IP address."),
{'node': task.node.uuid})
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
port_vif = vifs[port_uuid]
port_ip_address = self._get_fixed_ip_address(port_vif, client)
return port_ip_address
def get_ip_addresses(self, task):
"""Get IP addresses for all ports in `task`.
:param task: a TaskManager instance.
:returns: List of IP addresses associated with task.ports.
"""
client = _build_client(task.context.auth_token)
failures = []
ip_addresses = []
for port in task.ports:
try:
port_ip_address = self._get_port_ip_address(task, port.uuid,
client)
ip_addresses.append(port_ip_address)
except (exception.FailedToGetIPAddressOnPort,
exception.InvalidIPv4Address):
failures.append(port.uuid)
if failures:
LOG.warn(_LW("Some errors were encountered on node %(node)s"
" while retrieving IP address on the following"
" ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
return ip_addresses
def create_cleaning_ports(self, task):
"""Create neutron ports for each port on task.node to boot the ramdisk.
:param task: a TaskManager instance.
:raises: InvalidParameterValue if the cleaning network is None
:returns: a dictionary in the form {port.uuid: neutron_port['id']}
"""
if not CONF.neutron.cleaning_network_uuid:
raise exception.InvalidParameterValue(_('Valid cleaning network '
'UUID not provided'))
neutron_client = _build_client(task.context.auth_token)
body = {
'port': {
'network_id': CONF.neutron.cleaning_network_uuid,
'admin_state_up': True,
}
}
ports = {}
for ironic_port in task.ports:
body['port']['mac_address'] = ironic_port.address
try:
port = neutron_client.create_port(body)
except neutron_client_exc.ConnectionFailed as e:
self._rollback_cleaning_ports(task)
msg = (_('Could not create cleaning port on network %(net)s '
'from %(node)s. %(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
if not port.get('port') or not port['port'].get('id'):
self._rollback_cleaning_ports(task)
msg = (_('Failed to create cleaning ports for node '
'%(node)s') % task.node.uuid)
LOG.error(msg)
raise exception.NodeCleaningFailure(msg)
# Match return value of get_node_vif_ids()
ports[ironic_port.uuid] = port['port']['id']
return ports
def delete_cleaning_ports(self, task):
"""Deletes the neutron port created for booting the ramdisk.
:param task: a TaskManager instance.
"""
neutron_client = _build_client(task.context.auth_token)
macs = [p.address for p in task.ports]
params = {
'network_id': CONF.neutron.cleaning_network_uuid
}
try:
ports = neutron_client.list_ports(**params)
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not get cleaning network vif for %(node)s '
'from Neutron, possible network issue. %(exc)s') %
{'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
# Iterate the list of Neutron port dicts, remove the ones we added
for neutron_port in ports.get('ports', []):
# Only delete ports using the node's mac addresses
if neutron_port.get('mac_address') in macs:
try:
neutron_client.delete_port(neutron_port.get('id'))
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not remove cleaning ports on network '
'%(net)s from %(node)s, possible network issue. '
'%(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
def _rollback_cleaning_ports(self, task):
"""Attempts to delete any ports created by cleaning
Purposefully will not raise any exceptions so error handling can
continue.
:param task: a TaskManager instance.
"""
try:
self.delete_cleaning_ports(task)
except Exception:
# Log the error, but let the caller invoke the
# manager.cleaning_error_handler().
LOG.exception(_LE('Failed to rollback cleaning port '
'changes for node %s') % task.node.uuid)
|
_get_port_ip_address
|
Get ip address of ironic port assigned by neutron.
:param task: a TaskManager instance.
:param port_uuid: ironic Node's port UUID.
:param client: Neutron client instance.
:returns: Neutron port ip address associated with Node's port.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
|
#
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import keystone
from ironic.common import network
from ironic.dhcp import base
from ironic.drivers.modules import ssh
neutron_opts = [
cfg.StrOpt('url',
default='http://$my_ip:9696',
help='URL for connecting to neutron.'),
cfg.IntOpt('url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds.'),
cfg.IntOpt('retries',
default=3,
help='Client retries in the case of a failed request.'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Default authentication strategy to use when connecting '
'to neutron. Can be either "keystone" or "noauth". '
'Running neutron in noauth mode (related to but not '
'affected by this setting) is insecure and should only be '
'used for testing.'),
cfg.StrOpt('cleaning_network_uuid',
help='UUID of the network to create Neutron ports on when '
'booting to a ramdisk for cleaning/zapping using Neutron '
'DHCP')
]
CONF = cfg.CONF
CONF.import_opt('my_ip', 'ironic.netconf')
CONF.register_opts(neutron_opts, group='neutron')
LOG = logging.getLogger(__name__)
def _build_client(token=None):
"""Utility function to create Neutron client."""
params = {
'timeout': CONF.neutron.url_timeout,
'retries': CONF.neutron.retries,
'insecure': CONF.keystone_authtoken.insecure,
'ca_cert': CONF.keystone_authtoken.certfile,
}
if CONF.neutron.auth_strategy not in ['noauth', 'keystone']:
raise exception.ConfigInvalid(_('Neutron auth_strategy should be '
'either "noauth" or "keystone".'))
if CONF.neutron.auth_strategy == 'noauth':
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = 'noauth'
elif (CONF.neutron.auth_strategy == 'keystone' and
token is None):
params['endpoint_url'] = (CONF.neutron.url or
keystone.get_service_url('neutron'))
params['username'] = CONF.keystone_authtoken.admin_user
params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name
params['password'] = CONF.keystone_authtoken.admin_password
params['auth_url'] = (CONF.keystone_authtoken.auth_uri or '')
if CONF.keystone.region_name:
params['region_name'] = CONF.keystone.region_name
else:
params['token'] = token
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = None
return clientv20.Client(**params)
class NeutronDHCPApi(base.BaseDHCP):
"""API for communicating to neutron 2.x API."""
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
"""Update a port's attributes.
Update one or more DHCP options on the specified port.
For the relevant API spec, see
http://docs.openstack.org/api/openstack-network/2.0/content/extra-dhc-opt-ext-update.html
:param port_id: designate which port these attributes
will be applied to.
:param dhcp_options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param token: optional auth token.
:raises: FailedToUpdateDHCPOptOnPort
"""
port_req_body = {'port': {'extra_dhcp_opts': dhcp_options}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update Neutron port %s."), port_id)
raise exception.FailedToUpdateDHCPOptOnPort(port_id=port_id)
def update_port_address(self, port_id, address, token=None):
"""Update a port's mac address.
:param port_id: Neutron port id.
:param address: new MAC address.
:param token: optional auth token.
:raises: FailedToUpdateMacOnPort
"""
port_req_body = {'port': {'mac_address': address}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update MAC address on Neutron "
"port %s."), port_id)
raise exception.FailedToUpdateMacOnPort(port_id=port_id)
def update_dhcp_opts(self, task, options, vifs=None):
"""Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param vifs: a dict of Neutron port dicts to update DHCP options on.
The keys should be Ironic port UUIDs, and the values should be
Neutron port UUIDs
If the value is None, will get the list of ports from the Ironic
port objects.
"""
if vifs is None:
vifs = network.get_node_vif_ids(task)
if not vifs:
raise exception.FailedToUpdateDHCPOptOnPort(
_("No VIFs found for node %(node)s when attempting "
"to update DHCP BOOT options.") %
{'node': task.node.uuid})
failures = []
for port_id, port_vif in vifs.items():
try:
self.update_port_dhcp_opts(port_vif, options,
token=task.context.auth_token)
except exception.FailedToUpdateDHCPOptOnPort:
failures.append(port_id)
if failures:
if len(failures) == len(vifs):
raise exception.FailedToUpdateDHCPOptOnPort(_(
"Failed to set DHCP BOOT options for any port on node %s.")
% task.node.uuid)
else:
LOG.warning(_LW("Some errors were encountered when updating "
"the DHCP BOOT options for node %(node)s on "
"the following ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
# TODO(adam_g): Hack to workaround bug 1334447 until we have a
# mechanism for synchronizing events with Neutron. We need to sleep
# only if we are booting VMs, which is implied by SSHPower, to ensure
# they do not boot before Neutron agents have setup sufficient DHCP
# config for netboot.
if isinstance(task.driver.power, ssh.SSHPower):
LOG.debug("Waiting 15 seconds for Neutron.")
time.sleep(15)
def _get_fixed_ip_address(self, port_uuid, client):
"""Get a port's fixed ip address.
:param port_uuid: Neutron port id.
:param client: Neutron client instance.
:returns: Neutron port ip address.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
ip_address = None
try:
neutron_port = client.show_port(port_uuid).get('port')
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to Get IP address on Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
fixed_ips = neutron_port.get('fixed_ips')
# NOTE(faizan) At present only the first fixed_ip assigned to this
# neutron port will be used, since nova allocates only one fixed_ip
# for the instance.
if fixed_ips:
ip_address = fixed_ips[0].get('ip_address', None)
if ip_address:
if netutils.is_valid_ipv4(ip_address):
return ip_address
else:
LOG.error(_LE("Neutron returned invalid IPv4 address %s."),
ip_address)
raise exception.InvalidIPv4Address(ip_address=ip_address)
else:
LOG.error(_LE("No IP address assigned to Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
# MASKED: _get_port_ip_address function (lines 240-261)
def get_ip_addresses(self, task):
"""Get IP addresses for all ports in `task`.
:param task: a TaskManager instance.
:returns: List of IP addresses associated with task.ports.
"""
client = _build_client(task.context.auth_token)
failures = []
ip_addresses = []
for port in task.ports:
try:
port_ip_address = self._get_port_ip_address(task, port.uuid,
client)
ip_addresses.append(port_ip_address)
except (exception.FailedToGetIPAddressOnPort,
exception.InvalidIPv4Address):
failures.append(port.uuid)
if failures:
LOG.warn(_LW("Some errors were encountered on node %(node)s"
" while retrieving IP address on the following"
" ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
return ip_addresses
def create_cleaning_ports(self, task):
"""Create neutron ports for each port on task.node to boot the ramdisk.
:param task: a TaskManager instance.
:raises: InvalidParameterValue if the cleaning network is None
:returns: a dictionary in the form {port.uuid: neutron_port['id']}
"""
if not CONF.neutron.cleaning_network_uuid:
raise exception.InvalidParameterValue(_('Valid cleaning network '
'UUID not provided'))
neutron_client = _build_client(task.context.auth_token)
body = {
'port': {
'network_id': CONF.neutron.cleaning_network_uuid,
'admin_state_up': True,
}
}
ports = {}
for ironic_port in task.ports:
body['port']['mac_address'] = ironic_port.address
try:
port = neutron_client.create_port(body)
except neutron_client_exc.ConnectionFailed as e:
self._rollback_cleaning_ports(task)
msg = (_('Could not create cleaning port on network %(net)s '
'from %(node)s. %(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
if not port.get('port') or not port['port'].get('id'):
self._rollback_cleaning_ports(task)
msg = (_('Failed to create cleaning ports for node '
'%(node)s') % task.node.uuid)
LOG.error(msg)
raise exception.NodeCleaningFailure(msg)
# Match return value of get_node_vif_ids()
ports[ironic_port.uuid] = port['port']['id']
return ports
def delete_cleaning_ports(self, task):
"""Deletes the neutron port created for booting the ramdisk.
:param task: a TaskManager instance.
"""
neutron_client = _build_client(task.context.auth_token)
macs = [p.address for p in task.ports]
params = {
'network_id': CONF.neutron.cleaning_network_uuid
}
try:
ports = neutron_client.list_ports(**params)
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not get cleaning network vif for %(node)s '
'from Neutron, possible network issue. %(exc)s') %
{'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
# Iterate the list of Neutron port dicts, remove the ones we added
for neutron_port in ports.get('ports', []):
# Only delete ports using the node's mac addresses
if neutron_port.get('mac_address') in macs:
try:
neutron_client.delete_port(neutron_port.get('id'))
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not remove cleaning ports on network '
'%(net)s from %(node)s, possible network issue. '
'%(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
def _rollback_cleaning_ports(self, task):
"""Attempts to delete any ports created by cleaning
Purposefully will not raise any exceptions so error handling can
continue.
:param task: a TaskManager instance.
"""
try:
self.delete_cleaning_ports(task)
except Exception:
# Log the error, but let the caller invoke the
# manager.cleaning_error_handler().
LOG.exception(_LE('Failed to rollback cleaning port '
'changes for node %s') % task.node.uuid)
|
def _get_port_ip_address(self, task, port_uuid, client):
"""Get ip address of ironic port assigned by neutron.
:param task: a TaskManager instance.
:param port_uuid: ironic Node's port UUID.
:param client: Neutron client instance.
:returns: Neutron port ip address associated with Node's port.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
vifs = network.get_node_vif_ids(task)
if not vifs:
LOG.warning(_LW("No VIFs found for node %(node)s when attempting "
" to get port IP address."),
{'node': task.node.uuid})
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
port_vif = vifs[port_uuid]
port_ip_address = self._get_fixed_ip_address(port_vif, client)
return port_ip_address
| 240
| 261
|
#
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import keystone
from ironic.common import network
from ironic.dhcp import base
from ironic.drivers.modules import ssh
neutron_opts = [
cfg.StrOpt('url',
default='http://$my_ip:9696',
help='URL for connecting to neutron.'),
cfg.IntOpt('url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds.'),
cfg.IntOpt('retries',
default=3,
help='Client retries in the case of a failed request.'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Default authentication strategy to use when connecting '
'to neutron. Can be either "keystone" or "noauth". '
'Running neutron in noauth mode (related to but not '
'affected by this setting) is insecure and should only be '
'used for testing.'),
cfg.StrOpt('cleaning_network_uuid',
help='UUID of the network to create Neutron ports on when '
'booting to a ramdisk for cleaning/zapping using Neutron '
'DHCP')
]
CONF = cfg.CONF
CONF.import_opt('my_ip', 'ironic.netconf')
CONF.register_opts(neutron_opts, group='neutron')
LOG = logging.getLogger(__name__)
def _build_client(token=None):
"""Utility function to create Neutron client."""
params = {
'timeout': CONF.neutron.url_timeout,
'retries': CONF.neutron.retries,
'insecure': CONF.keystone_authtoken.insecure,
'ca_cert': CONF.keystone_authtoken.certfile,
}
if CONF.neutron.auth_strategy not in ['noauth', 'keystone']:
raise exception.ConfigInvalid(_('Neutron auth_strategy should be '
'either "noauth" or "keystone".'))
if CONF.neutron.auth_strategy == 'noauth':
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = 'noauth'
elif (CONF.neutron.auth_strategy == 'keystone' and
token is None):
params['endpoint_url'] = (CONF.neutron.url or
keystone.get_service_url('neutron'))
params['username'] = CONF.keystone_authtoken.admin_user
params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name
params['password'] = CONF.keystone_authtoken.admin_password
params['auth_url'] = (CONF.keystone_authtoken.auth_uri or '')
if CONF.keystone.region_name:
params['region_name'] = CONF.keystone.region_name
else:
params['token'] = token
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = None
return clientv20.Client(**params)
class NeutronDHCPApi(base.BaseDHCP):
"""API for communicating to neutron 2.x API."""
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
"""Update a port's attributes.
Update one or more DHCP options on the specified port.
For the relevant API spec, see
http://docs.openstack.org/api/openstack-network/2.0/content/extra-dhc-opt-ext-update.html
:param port_id: designate which port these attributes
will be applied to.
:param dhcp_options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param token: optional auth token.
:raises: FailedToUpdateDHCPOptOnPort
"""
port_req_body = {'port': {'extra_dhcp_opts': dhcp_options}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update Neutron port %s."), port_id)
raise exception.FailedToUpdateDHCPOptOnPort(port_id=port_id)
def update_port_address(self, port_id, address, token=None):
"""Update a port's mac address.
:param port_id: Neutron port id.
:param address: new MAC address.
:param token: optional auth token.
:raises: FailedToUpdateMacOnPort
"""
port_req_body = {'port': {'mac_address': address}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update MAC address on Neutron "
"port %s."), port_id)
raise exception.FailedToUpdateMacOnPort(port_id=port_id)
def update_dhcp_opts(self, task, options, vifs=None):
"""Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param vifs: a dict of Neutron port dicts to update DHCP options on.
The keys should be Ironic port UUIDs, and the values should be
Neutron port UUIDs
If the value is None, will get the list of ports from the Ironic
port objects.
"""
if vifs is None:
vifs = network.get_node_vif_ids(task)
if not vifs:
raise exception.FailedToUpdateDHCPOptOnPort(
_("No VIFs found for node %(node)s when attempting "
"to update DHCP BOOT options.") %
{'node': task.node.uuid})
failures = []
for port_id, port_vif in vifs.items():
try:
self.update_port_dhcp_opts(port_vif, options,
token=task.context.auth_token)
except exception.FailedToUpdateDHCPOptOnPort:
failures.append(port_id)
if failures:
if len(failures) == len(vifs):
raise exception.FailedToUpdateDHCPOptOnPort(_(
"Failed to set DHCP BOOT options for any port on node %s.")
% task.node.uuid)
else:
LOG.warning(_LW("Some errors were encountered when updating "
"the DHCP BOOT options for node %(node)s on "
"the following ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
# TODO(adam_g): Hack to workaround bug 1334447 until we have a
# mechanism for synchronizing events with Neutron. We need to sleep
# only if we are booting VMs, which is implied by SSHPower, to ensure
# they do not boot before Neutron agents have setup sufficient DHCP
# config for netboot.
if isinstance(task.driver.power, ssh.SSHPower):
LOG.debug("Waiting 15 seconds for Neutron.")
time.sleep(15)
def _get_fixed_ip_address(self, port_uuid, client):
"""Get a port's fixed ip address.
:param port_uuid: Neutron port id.
:param client: Neutron client instance.
:returns: Neutron port ip address.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
ip_address = None
try:
neutron_port = client.show_port(port_uuid).get('port')
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to Get IP address on Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
fixed_ips = neutron_port.get('fixed_ips')
# NOTE(faizan) At present only the first fixed_ip assigned to this
# neutron port will be used, since nova allocates only one fixed_ip
# for the instance.
if fixed_ips:
ip_address = fixed_ips[0].get('ip_address', None)
if ip_address:
if netutils.is_valid_ipv4(ip_address):
return ip_address
else:
LOG.error(_LE("Neutron returned invalid IPv4 address %s."),
ip_address)
raise exception.InvalidIPv4Address(ip_address=ip_address)
else:
LOG.error(_LE("No IP address assigned to Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
def _get_port_ip_address(self, task, port_uuid, client):
"""Get ip address of ironic port assigned by neutron.
:param task: a TaskManager instance.
:param port_uuid: ironic Node's port UUID.
:param client: Neutron client instance.
:returns: Neutron port ip address associated with Node's port.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
vifs = network.get_node_vif_ids(task)
if not vifs:
LOG.warning(_LW("No VIFs found for node %(node)s when attempting "
" to get port IP address."),
{'node': task.node.uuid})
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
port_vif = vifs[port_uuid]
port_ip_address = self._get_fixed_ip_address(port_vif, client)
return port_ip_address
def get_ip_addresses(self, task):
"""Get IP addresses for all ports in `task`.
:param task: a TaskManager instance.
:returns: List of IP addresses associated with task.ports.
"""
client = _build_client(task.context.auth_token)
failures = []
ip_addresses = []
for port in task.ports:
try:
port_ip_address = self._get_port_ip_address(task, port.uuid,
client)
ip_addresses.append(port_ip_address)
except (exception.FailedToGetIPAddressOnPort,
exception.InvalidIPv4Address):
failures.append(port.uuid)
if failures:
LOG.warn(_LW("Some errors were encountered on node %(node)s"
" while retrieving IP address on the following"
" ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
return ip_addresses
def create_cleaning_ports(self, task):
"""Create neutron ports for each port on task.node to boot the ramdisk.
:param task: a TaskManager instance.
:raises: InvalidParameterValue if the cleaning network is None
:returns: a dictionary in the form {port.uuid: neutron_port['id']}
"""
if not CONF.neutron.cleaning_network_uuid:
raise exception.InvalidParameterValue(_('Valid cleaning network '
'UUID not provided'))
neutron_client = _build_client(task.context.auth_token)
body = {
'port': {
'network_id': CONF.neutron.cleaning_network_uuid,
'admin_state_up': True,
}
}
ports = {}
for ironic_port in task.ports:
body['port']['mac_address'] = ironic_port.address
try:
port = neutron_client.create_port(body)
except neutron_client_exc.ConnectionFailed as e:
self._rollback_cleaning_ports(task)
msg = (_('Could not create cleaning port on network %(net)s '
'from %(node)s. %(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
if not port.get('port') or not port['port'].get('id'):
self._rollback_cleaning_ports(task)
msg = (_('Failed to create cleaning ports for node '
'%(node)s') % task.node.uuid)
LOG.error(msg)
raise exception.NodeCleaningFailure(msg)
# Match return value of get_node_vif_ids()
ports[ironic_port.uuid] = port['port']['id']
return ports
def delete_cleaning_ports(self, task):
"""Deletes the neutron port created for booting the ramdisk.
:param task: a TaskManager instance.
"""
neutron_client = _build_client(task.context.auth_token)
macs = [p.address for p in task.ports]
params = {
'network_id': CONF.neutron.cleaning_network_uuid
}
try:
ports = neutron_client.list_ports(**params)
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not get cleaning network vif for %(node)s '
'from Neutron, possible network issue. %(exc)s') %
{'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
# Iterate the list of Neutron port dicts, remove the ones we added
for neutron_port in ports.get('ports', []):
# Only delete ports using the node's mac addresses
if neutron_port.get('mac_address') in macs:
try:
neutron_client.delete_port(neutron_port.get('id'))
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not remove cleaning ports on network '
'%(net)s from %(node)s, possible network issue. '
'%(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
def _rollback_cleaning_ports(self, task):
"""Attempts to delete any ports created by cleaning
Purposefully will not raise any exceptions so error handling can
continue.
:param task: a TaskManager instance.
"""
try:
self.delete_cleaning_ports(task)
except Exception:
# Log the error, but let the caller invoke the
# manager.cleaning_error_handler().
LOG.exception(_LE('Failed to rollback cleaning port '
'changes for node %s') % task.node.uuid)
|
get_ip_addresses
|
Get IP addresses for all ports in `task`.
:param task: a TaskManager instance.
:returns: List of IP addresses associated with task.ports.
|
#
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import keystone
from ironic.common import network
from ironic.dhcp import base
from ironic.drivers.modules import ssh
neutron_opts = [
cfg.StrOpt('url',
default='http://$my_ip:9696',
help='URL for connecting to neutron.'),
cfg.IntOpt('url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds.'),
cfg.IntOpt('retries',
default=3,
help='Client retries in the case of a failed request.'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Default authentication strategy to use when connecting '
'to neutron. Can be either "keystone" or "noauth". '
'Running neutron in noauth mode (related to but not '
'affected by this setting) is insecure and should only be '
'used for testing.'),
cfg.StrOpt('cleaning_network_uuid',
help='UUID of the network to create Neutron ports on when '
'booting to a ramdisk for cleaning/zapping using Neutron '
'DHCP')
]
CONF = cfg.CONF
CONF.import_opt('my_ip', 'ironic.netconf')
CONF.register_opts(neutron_opts, group='neutron')
LOG = logging.getLogger(__name__)
def _build_client(token=None):
"""Utility function to create Neutron client."""
params = {
'timeout': CONF.neutron.url_timeout,
'retries': CONF.neutron.retries,
'insecure': CONF.keystone_authtoken.insecure,
'ca_cert': CONF.keystone_authtoken.certfile,
}
if CONF.neutron.auth_strategy not in ['noauth', 'keystone']:
raise exception.ConfigInvalid(_('Neutron auth_strategy should be '
'either "noauth" or "keystone".'))
if CONF.neutron.auth_strategy == 'noauth':
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = 'noauth'
elif (CONF.neutron.auth_strategy == 'keystone' and
token is None):
params['endpoint_url'] = (CONF.neutron.url or
keystone.get_service_url('neutron'))
params['username'] = CONF.keystone_authtoken.admin_user
params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name
params['password'] = CONF.keystone_authtoken.admin_password
params['auth_url'] = (CONF.keystone_authtoken.auth_uri or '')
if CONF.keystone.region_name:
params['region_name'] = CONF.keystone.region_name
else:
params['token'] = token
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = None
return clientv20.Client(**params)
class NeutronDHCPApi(base.BaseDHCP):
"""API for communicating to neutron 2.x API."""
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
"""Update a port's attributes.
Update one or more DHCP options on the specified port.
For the relevant API spec, see
http://docs.openstack.org/api/openstack-network/2.0/content/extra-dhc-opt-ext-update.html
:param port_id: designate which port these attributes
will be applied to.
:param dhcp_options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param token: optional auth token.
:raises: FailedToUpdateDHCPOptOnPort
"""
port_req_body = {'port': {'extra_dhcp_opts': dhcp_options}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update Neutron port %s."), port_id)
raise exception.FailedToUpdateDHCPOptOnPort(port_id=port_id)
def update_port_address(self, port_id, address, token=None):
"""Update a port's mac address.
:param port_id: Neutron port id.
:param address: new MAC address.
:param token: optional auth token.
:raises: FailedToUpdateMacOnPort
"""
port_req_body = {'port': {'mac_address': address}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update MAC address on Neutron "
"port %s."), port_id)
raise exception.FailedToUpdateMacOnPort(port_id=port_id)
def update_dhcp_opts(self, task, options, vifs=None):
"""Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param vifs: a dict of Neutron port dicts to update DHCP options on.
The keys should be Ironic port UUIDs, and the values should be
Neutron port UUIDs
If the value is None, will get the list of ports from the Ironic
port objects.
"""
if vifs is None:
vifs = network.get_node_vif_ids(task)
if not vifs:
raise exception.FailedToUpdateDHCPOptOnPort(
_("No VIFs found for node %(node)s when attempting "
"to update DHCP BOOT options.") %
{'node': task.node.uuid})
failures = []
for port_id, port_vif in vifs.items():
try:
self.update_port_dhcp_opts(port_vif, options,
token=task.context.auth_token)
except exception.FailedToUpdateDHCPOptOnPort:
failures.append(port_id)
if failures:
if len(failures) == len(vifs):
raise exception.FailedToUpdateDHCPOptOnPort(_(
"Failed to set DHCP BOOT options for any port on node %s.")
% task.node.uuid)
else:
LOG.warning(_LW("Some errors were encountered when updating "
"the DHCP BOOT options for node %(node)s on "
"the following ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
# TODO(adam_g): Hack to workaround bug 1334447 until we have a
# mechanism for synchronizing events with Neutron. We need to sleep
# only if we are booting VMs, which is implied by SSHPower, to ensure
# they do not boot before Neutron agents have setup sufficient DHCP
# config for netboot.
if isinstance(task.driver.power, ssh.SSHPower):
LOG.debug("Waiting 15 seconds for Neutron.")
time.sleep(15)
def _get_fixed_ip_address(self, port_uuid, client):
"""Get a port's fixed ip address.
:param port_uuid: Neutron port id.
:param client: Neutron client instance.
:returns: Neutron port ip address.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
ip_address = None
try:
neutron_port = client.show_port(port_uuid).get('port')
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to Get IP address on Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
fixed_ips = neutron_port.get('fixed_ips')
# NOTE(faizan) At present only the first fixed_ip assigned to this
# neutron port will be used, since nova allocates only one fixed_ip
# for the instance.
if fixed_ips:
ip_address = fixed_ips[0].get('ip_address', None)
if ip_address:
if netutils.is_valid_ipv4(ip_address):
return ip_address
else:
LOG.error(_LE("Neutron returned invalid IPv4 address %s."),
ip_address)
raise exception.InvalidIPv4Address(ip_address=ip_address)
else:
LOG.error(_LE("No IP address assigned to Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
def _get_port_ip_address(self, task, port_uuid, client):
"""Get ip address of ironic port assigned by neutron.
:param task: a TaskManager instance.
:param port_uuid: ironic Node's port UUID.
:param client: Neutron client instance.
:returns: Neutron port ip address associated with Node's port.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
vifs = network.get_node_vif_ids(task)
if not vifs:
LOG.warning(_LW("No VIFs found for node %(node)s when attempting "
" to get port IP address."),
{'node': task.node.uuid})
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
port_vif = vifs[port_uuid]
port_ip_address = self._get_fixed_ip_address(port_vif, client)
return port_ip_address
# MASKED: get_ip_addresses function (lines 263-287)
def create_cleaning_ports(self, task):
"""Create neutron ports for each port on task.node to boot the ramdisk.
:param task: a TaskManager instance.
:raises: InvalidParameterValue if the cleaning network is None
:returns: a dictionary in the form {port.uuid: neutron_port['id']}
"""
if not CONF.neutron.cleaning_network_uuid:
raise exception.InvalidParameterValue(_('Valid cleaning network '
'UUID not provided'))
neutron_client = _build_client(task.context.auth_token)
body = {
'port': {
'network_id': CONF.neutron.cleaning_network_uuid,
'admin_state_up': True,
}
}
ports = {}
for ironic_port in task.ports:
body['port']['mac_address'] = ironic_port.address
try:
port = neutron_client.create_port(body)
except neutron_client_exc.ConnectionFailed as e:
self._rollback_cleaning_ports(task)
msg = (_('Could not create cleaning port on network %(net)s '
'from %(node)s. %(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
if not port.get('port') or not port['port'].get('id'):
self._rollback_cleaning_ports(task)
msg = (_('Failed to create cleaning ports for node '
'%(node)s') % task.node.uuid)
LOG.error(msg)
raise exception.NodeCleaningFailure(msg)
# Match return value of get_node_vif_ids()
ports[ironic_port.uuid] = port['port']['id']
return ports
def delete_cleaning_ports(self, task):
"""Deletes the neutron port created for booting the ramdisk.
:param task: a TaskManager instance.
"""
neutron_client = _build_client(task.context.auth_token)
macs = [p.address for p in task.ports]
params = {
'network_id': CONF.neutron.cleaning_network_uuid
}
try:
ports = neutron_client.list_ports(**params)
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not get cleaning network vif for %(node)s '
'from Neutron, possible network issue. %(exc)s') %
{'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
# Iterate the list of Neutron port dicts, remove the ones we added
for neutron_port in ports.get('ports', []):
# Only delete ports using the node's mac addresses
if neutron_port.get('mac_address') in macs:
try:
neutron_client.delete_port(neutron_port.get('id'))
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not remove cleaning ports on network '
'%(net)s from %(node)s, possible network issue. '
'%(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
def _rollback_cleaning_ports(self, task):
"""Attempts to delete any ports created by cleaning
Purposefully will not raise any exceptions so error handling can
continue.
:param task: a TaskManager instance.
"""
try:
self.delete_cleaning_ports(task)
except Exception:
# Log the error, but let the caller invoke the
# manager.cleaning_error_handler().
LOG.exception(_LE('Failed to rollback cleaning port '
'changes for node %s') % task.node.uuid)
|
def get_ip_addresses(self, task):
"""Get IP addresses for all ports in `task`.
:param task: a TaskManager instance.
:returns: List of IP addresses associated with task.ports.
"""
client = _build_client(task.context.auth_token)
failures = []
ip_addresses = []
for port in task.ports:
try:
port_ip_address = self._get_port_ip_address(task, port.uuid,
client)
ip_addresses.append(port_ip_address)
except (exception.FailedToGetIPAddressOnPort,
exception.InvalidIPv4Address):
failures.append(port.uuid)
if failures:
LOG.warn(_LW("Some errors were encountered on node %(node)s"
" while retrieving IP address on the following"
" ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
return ip_addresses
| 263
| 287
|
#
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import keystone
from ironic.common import network
from ironic.dhcp import base
from ironic.drivers.modules import ssh
neutron_opts = [
cfg.StrOpt('url',
default='http://$my_ip:9696',
help='URL for connecting to neutron.'),
cfg.IntOpt('url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds.'),
cfg.IntOpt('retries',
default=3,
help='Client retries in the case of a failed request.'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Default authentication strategy to use when connecting '
'to neutron. Can be either "keystone" or "noauth". '
'Running neutron in noauth mode (related to but not '
'affected by this setting) is insecure and should only be '
'used for testing.'),
cfg.StrOpt('cleaning_network_uuid',
help='UUID of the network to create Neutron ports on when '
'booting to a ramdisk for cleaning/zapping using Neutron '
'DHCP')
]
CONF = cfg.CONF
CONF.import_opt('my_ip', 'ironic.netconf')
CONF.register_opts(neutron_opts, group='neutron')
LOG = logging.getLogger(__name__)
def _build_client(token=None):
"""Utility function to create Neutron client."""
params = {
'timeout': CONF.neutron.url_timeout,
'retries': CONF.neutron.retries,
'insecure': CONF.keystone_authtoken.insecure,
'ca_cert': CONF.keystone_authtoken.certfile,
}
if CONF.neutron.auth_strategy not in ['noauth', 'keystone']:
raise exception.ConfigInvalid(_('Neutron auth_strategy should be '
'either "noauth" or "keystone".'))
if CONF.neutron.auth_strategy == 'noauth':
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = 'noauth'
elif (CONF.neutron.auth_strategy == 'keystone' and
token is None):
params['endpoint_url'] = (CONF.neutron.url or
keystone.get_service_url('neutron'))
params['username'] = CONF.keystone_authtoken.admin_user
params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name
params['password'] = CONF.keystone_authtoken.admin_password
params['auth_url'] = (CONF.keystone_authtoken.auth_uri or '')
if CONF.keystone.region_name:
params['region_name'] = CONF.keystone.region_name
else:
params['token'] = token
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = None
return clientv20.Client(**params)
class NeutronDHCPApi(base.BaseDHCP):
"""API for communicating to neutron 2.x API."""
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
"""Update a port's attributes.
Update one or more DHCP options on the specified port.
For the relevant API spec, see
http://docs.openstack.org/api/openstack-network/2.0/content/extra-dhc-opt-ext-update.html
:param port_id: designate which port these attributes
will be applied to.
:param dhcp_options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param token: optional auth token.
:raises: FailedToUpdateDHCPOptOnPort
"""
port_req_body = {'port': {'extra_dhcp_opts': dhcp_options}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update Neutron port %s."), port_id)
raise exception.FailedToUpdateDHCPOptOnPort(port_id=port_id)
def update_port_address(self, port_id, address, token=None):
"""Update a port's mac address.
:param port_id: Neutron port id.
:param address: new MAC address.
:param token: optional auth token.
:raises: FailedToUpdateMacOnPort
"""
port_req_body = {'port': {'mac_address': address}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update MAC address on Neutron "
"port %s."), port_id)
raise exception.FailedToUpdateMacOnPort(port_id=port_id)
def update_dhcp_opts(self, task, options, vifs=None):
"""Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param vifs: a dict of Neutron port dicts to update DHCP options on.
The keys should be Ironic port UUIDs, and the values should be
Neutron port UUIDs
If the value is None, will get the list of ports from the Ironic
port objects.
"""
if vifs is None:
vifs = network.get_node_vif_ids(task)
if not vifs:
raise exception.FailedToUpdateDHCPOptOnPort(
_("No VIFs found for node %(node)s when attempting "
"to update DHCP BOOT options.") %
{'node': task.node.uuid})
failures = []
for port_id, port_vif in vifs.items():
try:
self.update_port_dhcp_opts(port_vif, options,
token=task.context.auth_token)
except exception.FailedToUpdateDHCPOptOnPort:
failures.append(port_id)
if failures:
if len(failures) == len(vifs):
raise exception.FailedToUpdateDHCPOptOnPort(_(
"Failed to set DHCP BOOT options for any port on node %s.")
% task.node.uuid)
else:
LOG.warning(_LW("Some errors were encountered when updating "
"the DHCP BOOT options for node %(node)s on "
"the following ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
# TODO(adam_g): Hack to workaround bug 1334447 until we have a
# mechanism for synchronizing events with Neutron. We need to sleep
# only if we are booting VMs, which is implied by SSHPower, to ensure
# they do not boot before Neutron agents have setup sufficient DHCP
# config for netboot.
if isinstance(task.driver.power, ssh.SSHPower):
LOG.debug("Waiting 15 seconds for Neutron.")
time.sleep(15)
def _get_fixed_ip_address(self, port_uuid, client):
"""Get a port's fixed ip address.
:param port_uuid: Neutron port id.
:param client: Neutron client instance.
:returns: Neutron port ip address.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
ip_address = None
try:
neutron_port = client.show_port(port_uuid).get('port')
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to Get IP address on Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
fixed_ips = neutron_port.get('fixed_ips')
# NOTE(faizan) At present only the first fixed_ip assigned to this
# neutron port will be used, since nova allocates only one fixed_ip
# for the instance.
if fixed_ips:
ip_address = fixed_ips[0].get('ip_address', None)
if ip_address:
if netutils.is_valid_ipv4(ip_address):
return ip_address
else:
LOG.error(_LE("Neutron returned invalid IPv4 address %s."),
ip_address)
raise exception.InvalidIPv4Address(ip_address=ip_address)
else:
LOG.error(_LE("No IP address assigned to Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
def _get_port_ip_address(self, task, port_uuid, client):
"""Get ip address of ironic port assigned by neutron.
:param task: a TaskManager instance.
:param port_uuid: ironic Node's port UUID.
:param client: Neutron client instance.
:returns: Neutron port ip address associated with Node's port.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
vifs = network.get_node_vif_ids(task)
if not vifs:
LOG.warning(_LW("No VIFs found for node %(node)s when attempting "
" to get port IP address."),
{'node': task.node.uuid})
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
port_vif = vifs[port_uuid]
port_ip_address = self._get_fixed_ip_address(port_vif, client)
return port_ip_address
def get_ip_addresses(self, task):
"""Get IP addresses for all ports in `task`.
:param task: a TaskManager instance.
:returns: List of IP addresses associated with task.ports.
"""
client = _build_client(task.context.auth_token)
failures = []
ip_addresses = []
for port in task.ports:
try:
port_ip_address = self._get_port_ip_address(task, port.uuid,
client)
ip_addresses.append(port_ip_address)
except (exception.FailedToGetIPAddressOnPort,
exception.InvalidIPv4Address):
failures.append(port.uuid)
if failures:
LOG.warn(_LW("Some errors were encountered on node %(node)s"
" while retrieving IP address on the following"
" ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
return ip_addresses
def create_cleaning_ports(self, task):
"""Create neutron ports for each port on task.node to boot the ramdisk.
:param task: a TaskManager instance.
:raises: InvalidParameterValue if the cleaning network is None
:returns: a dictionary in the form {port.uuid: neutron_port['id']}
"""
if not CONF.neutron.cleaning_network_uuid:
raise exception.InvalidParameterValue(_('Valid cleaning network '
'UUID not provided'))
neutron_client = _build_client(task.context.auth_token)
body = {
'port': {
'network_id': CONF.neutron.cleaning_network_uuid,
'admin_state_up': True,
}
}
ports = {}
for ironic_port in task.ports:
body['port']['mac_address'] = ironic_port.address
try:
port = neutron_client.create_port(body)
except neutron_client_exc.ConnectionFailed as e:
self._rollback_cleaning_ports(task)
msg = (_('Could not create cleaning port on network %(net)s '
'from %(node)s. %(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
if not port.get('port') or not port['port'].get('id'):
self._rollback_cleaning_ports(task)
msg = (_('Failed to create cleaning ports for node '
'%(node)s') % task.node.uuid)
LOG.error(msg)
raise exception.NodeCleaningFailure(msg)
# Match return value of get_node_vif_ids()
ports[ironic_port.uuid] = port['port']['id']
return ports
def delete_cleaning_ports(self, task):
"""Deletes the neutron port created for booting the ramdisk.
:param task: a TaskManager instance.
"""
neutron_client = _build_client(task.context.auth_token)
macs = [p.address for p in task.ports]
params = {
'network_id': CONF.neutron.cleaning_network_uuid
}
try:
ports = neutron_client.list_ports(**params)
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not get cleaning network vif for %(node)s '
'from Neutron, possible network issue. %(exc)s') %
{'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
# Iterate the list of Neutron port dicts, remove the ones we added
for neutron_port in ports.get('ports', []):
# Only delete ports using the node's mac addresses
if neutron_port.get('mac_address') in macs:
try:
neutron_client.delete_port(neutron_port.get('id'))
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not remove cleaning ports on network '
'%(net)s from %(node)s, possible network issue. '
'%(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
def _rollback_cleaning_ports(self, task):
"""Attempts to delete any ports created by cleaning
Purposefully will not raise any exceptions so error handling can
continue.
:param task: a TaskManager instance.
"""
try:
self.delete_cleaning_ports(task)
except Exception:
# Log the error, but let the caller invoke the
# manager.cleaning_error_handler().
LOG.exception(_LE('Failed to rollback cleaning port '
'changes for node %s') % task.node.uuid)
|
enumerate_cpu_counts
|
This program prints the number of CPU counts to benchmark on this machine.
We remove some percentage of CPU cores off the top for system / background processing. With
the CPUs that remain, we generate a list of evenly spaced worker counts. The list is limited
by the number of trials desired. This is meant to help us explore the number of CPUs that
should be dedicated to MZ_WORKERS, not as a prescription for the correct values to choose.
On a Macbook with 8 cores, this will return [6, 4, 3, 2].
On a 56 core machine, this returns [24, 18, 12, 6].
On a 96 core machine, this returns [41, 30, 20, 10].
|
# Copyright Materialize, Inc. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
#
# mzbuild.py -- script to run materialized benchmarks
import argparse
import csv
import itertools
import multiprocessing
import os
import pathlib
import subprocess
import sys
import typing
import uuid
import webbrowser
def mzbuild_tag(git_ref: str) -> str:
if not git_ref:
return git_ref
try:
return (
subprocess.check_output(
["git", "describe", "--exact-match", git_ref], stderr=subprocess.STDOUT
)
.strip()
.decode()
)
except subprocess.CalledProcessError:
unstable_ref = (
subprocess.check_output(["git", "rev-parse", "--verify", git_ref])
.strip()
.decode()
)
return f"unstable-{unstable_ref}"
def mzcompose_location(mz_root: str) -> pathlib.Path:
"""Return the absolute path to mzcompose.
MZ_ROOT is expected to be set via pyactivate.
"""
return pathlib.Path(mz_root, "bin", "mzcompose")
def main(args: argparse.Namespace) -> None:
# Ensure that we are working out of the git directory so that commands, such as git, will work
mz_root = os.environ["MZ_ROOT"]
os.chdir(mz_root)
worker_counts = enumerate_cpu_counts()
if args.no_benchmark_this_checkout:
git_references = args.git_references
else:
git_references = [None, *args.git_references]
if args.verbose:
build_tags = [None, *[mzbuild_tag(ref) for ref in args.git_references]]
print(f"DEBUG: num_iterations={args.num_measurements}")
print(f"DEBUG: worker_counts={worker_counts}")
print(f"DEBUG: mzbuild_tags={build_tags}")
if args.size == "benchmark-ci":
# Explicitly override the worker counts for the CI benchmark
worker_counts = [1]
setup_benchmark = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"run",
f"setup-benchmark-{args.size}",
]
run_benchmark = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"run",
f"run-benchmark-{args.size}",
]
field_names = [
"git_revision",
"num_workers",
"iteration",
"seconds_taken",
"rows_per_second",
"grafana_url",
]
results_writer = csv.DictWriter(sys.stdout, field_names)
results_writer.writeheader()
# We use check_output because check_call does not capture output
try:
subprocess.check_output(setup_benchmark, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError,) as e:
print(
f"Setup benchmark failed! Output from failed command:\n{e.output.decode()}"
)
raise
if args.web:
try:
web_command = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"web",
f"perf-dash-web",
]
output = subprocess.check_output(web_command, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError,) as e:
print(f"Failed to open browser to perf-dash:\n{e.output.decode()}")
raise
iterations = range(0, args.num_measurements)
for (iteration, worker_count, git_ref) in itertools.product(
iterations, worker_counts, git_references
):
# Sadly, environment variables are the only way to pass this information into containers
# started by mzcompose
child_env = os.environ.copy()
child_env["MZ_ROOT"] = mz_root
child_env["MZ_WORKERS"] = str(worker_count)
child_env["MZBENCH_ID"] = args.benchmark_id
child_env["MZBUILD_WAIT_FOR_IMAGE"] = "true"
if git_ref:
child_env["MZBENCH_GIT_REF"] = git_ref
child_env["MZBUILD_MATERIALIZED_TAG"] = mzbuild_tag(git_ref)
try:
output = subprocess.check_output(
run_benchmark, env=child_env, stderr=subprocess.STDOUT
)
except (subprocess.CalledProcessError,) as e:
# TODO: Don't exit with error on simple benchmark failure
print(
f"Setup benchmark failed! Output from failed command:\n{e.output.decode()}"
)
raise
# TODO: Replace parsing output from mzcompose with reading from a well known file or topic
for line in output.decode().splitlines():
if line.startswith("SUCCESS!"):
for token in line.split(" "):
if token.startswith("seconds_taken="):
seconds_taken = token[len("seconds_taken=") :]
elif token.startswith("rows_per_sec="):
rows_per_second = token[len("rows_per_sec=") :]
elif line.startswith("Grafana URL: "):
grafana_url = line[len("Grafana URL: ") :]
results_writer.writerow(
{
"git_revision": git_ref if git_ref else "None",
"num_workers": worker_count,
"iteration": iteration,
"seconds_taken": seconds_taken,
"rows_per_second": rows_per_second,
"grafana_url": grafana_url,
}
)
# MASKED: enumerate_cpu_counts function (lines 175-197)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-b",
"--benchmark-id",
type=str,
default=str(uuid.uuid4()),
help="Pseudo-unique identifier to use for this benchmark",
)
parser.add_argument(
"-n",
"--num-measurements",
type=int,
default=6,
help="Number of times to repeat each benchmark iteration",
)
parser.add_argument(
"-s",
"--size",
type=str,
default="medium",
choices=["medium", "ci", "large"],
help="Name of the mzcompose composition to run",
)
parser.add_argument(
"--no-benchmark-this-checkout",
action="store_true",
help="Don't benchmark the version of materialized in this checkout",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose logging output"
)
parser.add_argument(
"-w",
"--web",
action="store_true",
help="Open a web browser showing results visualizations",
)
parser.add_argument(
"composition",
type=str,
help="Name of the mzcompose composition to run",
)
parser.add_argument(
"git_references",
type=str,
nargs="*",
help="Materialized builds to test as well, identified by git reference",
)
args = parser.parse_args()
main(args)
|
def enumerate_cpu_counts() -> typing.List[int]:
"""This program prints the number of CPU counts to benchmark on this machine.
We remove some percentage of CPU cores off the top for system / background processing. With
the CPUs that remain, we generate a list of evenly spaced worker counts. The list is limited
by the number of trials desired. This is meant to help us explore the number of CPUs that
should be dedicated to MZ_WORKERS, not as a prescription for the correct values to choose.
On a Macbook with 8 cores, this will return [6, 4, 3, 2].
On a 56 core machine, this returns [24, 18, 12, 6].
On a 96 core machine, this returns [41, 30, 20, 10].
"""
# 15% overhead and count physical cores only
max_cpus = round(multiprocessing.cpu_count() * 0.425)
num_trials = 4
# Yield the fractional points (4/4, 3/4, ...) between max and 0, not including 0
worker_counts = [round(i * max_cpus / num_trials) for i in range(num_trials, 0, -1)]
return list(reversed(sorted(set(worker_counts))))
| 175
| 197
|
# Copyright Materialize, Inc. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
#
# mzbuild.py -- script to run materialized benchmarks
import argparse
import csv
import itertools
import multiprocessing
import os
import pathlib
import subprocess
import sys
import typing
import uuid
import webbrowser
def mzbuild_tag(git_ref: str) -> str:
if not git_ref:
return git_ref
try:
return (
subprocess.check_output(
["git", "describe", "--exact-match", git_ref], stderr=subprocess.STDOUT
)
.strip()
.decode()
)
except subprocess.CalledProcessError:
unstable_ref = (
subprocess.check_output(["git", "rev-parse", "--verify", git_ref])
.strip()
.decode()
)
return f"unstable-{unstable_ref}"
def mzcompose_location(mz_root: str) -> pathlib.Path:
"""Return the absolute path to mzcompose.
MZ_ROOT is expected to be set via pyactivate.
"""
return pathlib.Path(mz_root, "bin", "mzcompose")
def main(args: argparse.Namespace) -> None:
# Ensure that we are working out of the git directory so that commands, such as git, will work
mz_root = os.environ["MZ_ROOT"]
os.chdir(mz_root)
worker_counts = enumerate_cpu_counts()
if args.no_benchmark_this_checkout:
git_references = args.git_references
else:
git_references = [None, *args.git_references]
if args.verbose:
build_tags = [None, *[mzbuild_tag(ref) for ref in args.git_references]]
print(f"DEBUG: num_iterations={args.num_measurements}")
print(f"DEBUG: worker_counts={worker_counts}")
print(f"DEBUG: mzbuild_tags={build_tags}")
if args.size == "benchmark-ci":
# Explicitly override the worker counts for the CI benchmark
worker_counts = [1]
setup_benchmark = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"run",
f"setup-benchmark-{args.size}",
]
run_benchmark = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"run",
f"run-benchmark-{args.size}",
]
field_names = [
"git_revision",
"num_workers",
"iteration",
"seconds_taken",
"rows_per_second",
"grafana_url",
]
results_writer = csv.DictWriter(sys.stdout, field_names)
results_writer.writeheader()
# We use check_output because check_call does not capture output
try:
subprocess.check_output(setup_benchmark, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError,) as e:
print(
f"Setup benchmark failed! Output from failed command:\n{e.output.decode()}"
)
raise
if args.web:
try:
web_command = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"web",
f"perf-dash-web",
]
output = subprocess.check_output(web_command, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError,) as e:
print(f"Failed to open browser to perf-dash:\n{e.output.decode()}")
raise
iterations = range(0, args.num_measurements)
for (iteration, worker_count, git_ref) in itertools.product(
iterations, worker_counts, git_references
):
# Sadly, environment variables are the only way to pass this information into containers
# started by mzcompose
child_env = os.environ.copy()
child_env["MZ_ROOT"] = mz_root
child_env["MZ_WORKERS"] = str(worker_count)
child_env["MZBENCH_ID"] = args.benchmark_id
child_env["MZBUILD_WAIT_FOR_IMAGE"] = "true"
if git_ref:
child_env["MZBENCH_GIT_REF"] = git_ref
child_env["MZBUILD_MATERIALIZED_TAG"] = mzbuild_tag(git_ref)
try:
output = subprocess.check_output(
run_benchmark, env=child_env, stderr=subprocess.STDOUT
)
except (subprocess.CalledProcessError,) as e:
# TODO: Don't exit with error on simple benchmark failure
print(
f"Setup benchmark failed! Output from failed command:\n{e.output.decode()}"
)
raise
# TODO: Replace parsing output from mzcompose with reading from a well known file or topic
for line in output.decode().splitlines():
if line.startswith("SUCCESS!"):
for token in line.split(" "):
if token.startswith("seconds_taken="):
seconds_taken = token[len("seconds_taken=") :]
elif token.startswith("rows_per_sec="):
rows_per_second = token[len("rows_per_sec=") :]
elif line.startswith("Grafana URL: "):
grafana_url = line[len("Grafana URL: ") :]
results_writer.writerow(
{
"git_revision": git_ref if git_ref else "None",
"num_workers": worker_count,
"iteration": iteration,
"seconds_taken": seconds_taken,
"rows_per_second": rows_per_second,
"grafana_url": grafana_url,
}
)
def enumerate_cpu_counts() -> typing.List[int]:
"""This program prints the number of CPU counts to benchmark on this machine.
We remove some percentage of CPU cores off the top for system / background processing. With
the CPUs that remain, we generate a list of evenly spaced worker counts. The list is limited
by the number of trials desired. This is meant to help us explore the number of CPUs that
should be dedicated to MZ_WORKERS, not as a prescription for the correct values to choose.
On a Macbook with 8 cores, this will return [6, 4, 3, 2].
On a 56 core machine, this returns [24, 18, 12, 6].
On a 96 core machine, this returns [41, 30, 20, 10].
"""
# 15% overhead and count physical cores only
max_cpus = round(multiprocessing.cpu_count() * 0.425)
num_trials = 4
# Yield the fractional points (4/4, 3/4, ...) between max and 0, not including 0
worker_counts = [round(i * max_cpus / num_trials) for i in range(num_trials, 0, -1)]
return list(reversed(sorted(set(worker_counts))))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-b",
"--benchmark-id",
type=str,
default=str(uuid.uuid4()),
help="Pseudo-unique identifier to use for this benchmark",
)
parser.add_argument(
"-n",
"--num-measurements",
type=int,
default=6,
help="Number of times to repeat each benchmark iteration",
)
parser.add_argument(
"-s",
"--size",
type=str,
default="medium",
choices=["medium", "ci", "large"],
help="Name of the mzcompose composition to run",
)
parser.add_argument(
"--no-benchmark-this-checkout",
action="store_true",
help="Don't benchmark the version of materialized in this checkout",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose logging output"
)
parser.add_argument(
"-w",
"--web",
action="store_true",
help="Open a web browser showing results visualizations",
)
parser.add_argument(
"composition",
type=str,
help="Name of the mzcompose composition to run",
)
parser.add_argument(
"git_references",
type=str,
nargs="*",
help="Materialized builds to test as well, identified by git reference",
)
args = parser.parse_args()
main(args)
|
get_rules
|
Get the rules governing the snapshot creation
Args:
rule_list: List of rules
Returns:
Rules object with attribute `rules`. See Rules object for detailed doc.
|
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Generate rules for snapshoting"""
from ggrc.snapshotter.datastructures import Attr
class Types(object):
"""Get default types for snapshotting"""
# pylint: disable=too-few-public-methods
all = {
"AccessGroup",
"AccountBalance",
"Contract",
"Control",
"DataAsset",
"Facility",
"Market",
"Objective",
"OrgGroup",
"Policy",
"Process",
"Product",
"Project",
"Regulation",
"Requirement",
"Standard",
"System",
"Vendor",
"Risk",
"TechnologyEnvironment",
"Threat",
"Metric",
"ProductGroup",
"KeyReport",
}
parents = {
"Audit",
}
scoped = {
"Assessment",
}
trans_scope = {
"Issue",
}
ignore = {
"Assessment",
"AssessmentTemplate",
"Issue",
"Workflow",
"Audit",
"Person"
}
external = {
"AccessGroup",
"AccountBalance",
"DataAsset",
"Facility",
"KeyReport",
"Market",
"Metric",
"OrgGroup",
"Process",
"Product",
"ProductGroup",
"Project",
"System",
"Vendor",
"TechnologyEnvironment",
"Control",
"Risk",
}
@classmethod
def internal_types(cls):
"""Return set of internal type names."""
return cls.all - cls.external
@classmethod
def external_types(cls):
"""Return set of external type names."""
return cls.external
class Rules(object):
"""Returns a dictionary of rules
Expected format of rule_list is the following:
[
{"master_object_type", ...},
{"first degree object types"},
{"second degree object types"}
]
For all master objects of type master_object_type, it will gather all
related objects from first degree object types (which can be related via
relationships table or via direct mapping (in which case you should wrap
the attribute name in Attr) and gather all of first degrees related objects
of the types listed in the second degree object type.
Example:
[
{"object_type_1", ["object_type_2", ...]},
{"type_of_related_object_or_attribute", ["second..."]},
{"type_of_object_to_snapshot_1", ["type_2", ...]}
]
From it, it will build a dictionary of format:
{
"parent_type": {
"fst": {"type_of_related_object_or_attribute_1", ...},
"snd": {"type_1", "type_2", ...}
},
...
}
"""
# pylint: disable=too-few-public-methods
def __init__(self, rule_list):
self.rules = dict()
for parents, fstdeg, snddeg in rule_list:
for parent in parents:
self.rules[parent] = {
"fst": fstdeg,
"snd": snddeg
}
DEFAULT_RULE_LIST = [
[
{"Audit"},
{Attr("program")},
Types.all - Types.ignore
]
]
# MASKED: get_rules function (lines 150-160)
|
def get_rules(rule_list=None):
"""Get the rules governing the snapshot creation
Args:
rule_list: List of rules
Returns:
Rules object with attribute `rules`. See Rules object for detailed doc.
"""
if not rule_list:
rule_list = DEFAULT_RULE_LIST
return Rules(rule_list)
| 150
| 160
|
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Generate rules for snapshoting"""
from ggrc.snapshotter.datastructures import Attr
class Types(object):
"""Get default types for snapshotting"""
# pylint: disable=too-few-public-methods
all = {
"AccessGroup",
"AccountBalance",
"Contract",
"Control",
"DataAsset",
"Facility",
"Market",
"Objective",
"OrgGroup",
"Policy",
"Process",
"Product",
"Project",
"Regulation",
"Requirement",
"Standard",
"System",
"Vendor",
"Risk",
"TechnologyEnvironment",
"Threat",
"Metric",
"ProductGroup",
"KeyReport",
}
parents = {
"Audit",
}
scoped = {
"Assessment",
}
trans_scope = {
"Issue",
}
ignore = {
"Assessment",
"AssessmentTemplate",
"Issue",
"Workflow",
"Audit",
"Person"
}
external = {
"AccessGroup",
"AccountBalance",
"DataAsset",
"Facility",
"KeyReport",
"Market",
"Metric",
"OrgGroup",
"Process",
"Product",
"ProductGroup",
"Project",
"System",
"Vendor",
"TechnologyEnvironment",
"Control",
"Risk",
}
@classmethod
def internal_types(cls):
"""Return set of internal type names."""
return cls.all - cls.external
@classmethod
def external_types(cls):
"""Return set of external type names."""
return cls.external
class Rules(object):
"""Returns a dictionary of rules
Expected format of rule_list is the following:
[
{"master_object_type", ...},
{"first degree object types"},
{"second degree object types"}
]
For all master objects of type master_object_type, it will gather all
related objects from first degree object types (which can be related via
relationships table or via direct mapping (in which case you should wrap
the attribute name in Attr) and gather all of first degrees related objects
of the types listed in the second degree object type.
Example:
[
{"object_type_1", ["object_type_2", ...]},
{"type_of_related_object_or_attribute", ["second..."]},
{"type_of_object_to_snapshot_1", ["type_2", ...]}
]
From it, it will build a dictionary of format:
{
"parent_type": {
"fst": {"type_of_related_object_or_attribute_1", ...},
"snd": {"type_1", "type_2", ...}
},
...
}
"""
# pylint: disable=too-few-public-methods
def __init__(self, rule_list):
self.rules = dict()
for parents, fstdeg, snddeg in rule_list:
for parent in parents:
self.rules[parent] = {
"fst": fstdeg,
"snd": snddeg
}
DEFAULT_RULE_LIST = [
[
{"Audit"},
{Attr("program")},
Types.all - Types.ignore
]
]
def get_rules(rule_list=None):
"""Get the rules governing the snapshot creation
Args:
rule_list: List of rules
Returns:
Rules object with attribute `rules`. See Rules object for detailed doc.
"""
if not rule_list:
rule_list = DEFAULT_RULE_LIST
return Rules(rule_list)
|
__init__
|
#**
#* Pubnub
#*
#* Init the Pubnub Client API
#*
#* @param string publish_key required key to send messages.
#* @param string subscribe_key required key to receive messages.
#* @param string secret_key optional key to sign messages.
#* @param boolean ssl required for 2048 bit encrypted messages.
#* @param string origin PUBNUB Server Origin.
#* @param string pres_uuid optional identifier for presence (auto-generated if not supplied)
#**
## Initiat Class
pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )
|
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.0 Real-time Push Cloud API
## -----------------------------------
try: import json
except ImportError: import simplejson as json
import time
import hashlib
import urllib2
import uuid
class Pubnub():
# MASKED: __init__ function (lines 21-63)
def publish( self, args ) :
"""
#**
#* Publish
#*
#* Send a message to a channel.
#*
#* @param array args with channel and message.
#* @return array success information.
#**
## Publish Example
info = pubnub.publish({
'channel' : 'hello_world',
'message' : {
'some_text' : 'Hello my World'
}
})
print(info)
"""
## Fail if bad input.
if not (args['channel'] and args['message']) :
return [ 0, 'Missing Channel or Message' ]
## Capture User Input
channel = str(args['channel'])
message = json.dumps(args['message'], separators=(',',':'))
## Sign Message
if self.secret_key :
signature = hashlib.md5('/'.join([
self.publish_key,
self.subscribe_key,
self.secret_key,
channel,
message
])).hexdigest()
else :
signature = '0'
## Send Message
return self._request([
'publish',
self.publish_key,
self.subscribe_key,
signature,
channel,
'0',
message
])
def subscribe( self, args ) :
"""
#**
#* Subscribe
#*
#* This is BLOCKING.
#* Listen for a message on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Subscribe Example
def receive(message) :
print(message)
return True
pubnub.subscribe({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
## Begin Subscribe
while True :
timetoken = 'timetoken' in args and args['timetoken'] or 0
try :
## Wait for Message
response = self._request(self._encode([
'subscribe',
subscribe_key,
channel,
'0',
str(timetoken)
])+['?uuid='+self.uuid], encode=False)
messages = response[0]
args['timetoken'] = response[1]
## If it was a timeout
if not len(messages) :
continue
## Run user Callback and Reconnect if user permits.
for message in messages :
if not callback(message) :
return
except Exception:
time.sleep(1)
return True
def presence( self, args ) :
"""
#**
#* presence
#*
#* This is BLOCKING.
#* Listen for presence events on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Presence Example
def pres_event(message) :
print(message)
return True
pubnub.presence({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
return self.subscribe({'channel': channel+'-pnpres', 'subscribe_key':subscribe_key, 'callback': callback})
def here_now( self, args ) :
"""
#**
#* Here Now
#*
#* Load current occupancy from a channel.
#*
#* @param array args with 'channel'.
#* @return mixed false on fail, array on success.
#*
## Presence Example
here_now = pubnub.here_now({
'channel' : 'hello_world',
})
print(here_now['occupancy'])
print(here_now['uuids'])
"""
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get Presence Here Now
return self._request([
'v2','presence',
'sub_key', self.subscribe_key,
'channel', channel
]);
def history( self, args ) :
"""
#**
#* History
#*
#* Load history from a channel.
#*
#* @param array args with 'channel' and 'limit'.
#* @return mixed false on fail, array on success.
#*
## History Example
history = pubnub.history({
'channel' : 'hello_world',
'limit' : 1
})
print(history)
"""
## Capture User Input
limit = args.has_key('limit') and int(args['limit']) or 10
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get History
return self._request([
'history',
self.subscribe_key,
channel,
'0',
str(limit)
]);
def time(self) :
"""
#**
#* Time
#*
#* Timestamp from PubNub Cloud.
#*
#* @return int timestamp.
#*
## PubNub Server Time Example
timestamp = pubnub.time()
print(timestamp)
"""
return self._request([
'time',
'0'
])[0]
def _encode( self, request ) :
return [
"".join([ ' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace( '0x', '%' ).upper() or
ch for ch in list(bit)
]) for bit in request]
def _request( self, request, origin = None, encode = True ) :
## Build URL
url = (origin or self.origin) + '/' + "/".join(
encode and self._encode(request) or request
)
## Send Request Expecting JSONP Response
try:
try: usock = urllib2.urlopen( url, None, 200 )
except TypeError: usock = urllib2.urlopen( url, None )
response = usock.read()
usock.close()
return json.loads( response )
except:
return None
|
def __init__(
self,
publish_key,
subscribe_key,
secret_key = False,
ssl_on = False,
origin = 'pubsub.pubnub.com',
pres_uuid = None
) :
"""
#**
#* Pubnub
#*
#* Init the Pubnub Client API
#*
#* @param string publish_key required key to send messages.
#* @param string subscribe_key required key to receive messages.
#* @param string secret_key optional key to sign messages.
#* @param boolean ssl required for 2048 bit encrypted messages.
#* @param string origin PUBNUB Server Origin.
#* @param string pres_uuid optional identifier for presence (auto-generated if not supplied)
#**
## Initiat Class
pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )
"""
self.origin = origin
self.limit = 1800
self.publish_key = publish_key
self.subscribe_key = subscribe_key
self.secret_key = secret_key
self.ssl = ssl_on
if self.ssl :
self.origin = 'https://' + self.origin
else :
self.origin = 'http://' + self.origin
self.uuid = pres_uuid or str(uuid.uuid4())
if not isinstance(self.uuid, basestring):
raise AttributeError("pres_uuid must be a string")
| 21
| 63
|
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.0 Real-time Push Cloud API
## -----------------------------------
try: import json
except ImportError: import simplejson as json
import time
import hashlib
import urllib2
import uuid
class Pubnub():
def __init__(
self,
publish_key,
subscribe_key,
secret_key = False,
ssl_on = False,
origin = 'pubsub.pubnub.com',
pres_uuid = None
) :
"""
#**
#* Pubnub
#*
#* Init the Pubnub Client API
#*
#* @param string publish_key required key to send messages.
#* @param string subscribe_key required key to receive messages.
#* @param string secret_key optional key to sign messages.
#* @param boolean ssl required for 2048 bit encrypted messages.
#* @param string origin PUBNUB Server Origin.
#* @param string pres_uuid optional identifier for presence (auto-generated if not supplied)
#**
## Initiat Class
pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )
"""
self.origin = origin
self.limit = 1800
self.publish_key = publish_key
self.subscribe_key = subscribe_key
self.secret_key = secret_key
self.ssl = ssl_on
if self.ssl :
self.origin = 'https://' + self.origin
else :
self.origin = 'http://' + self.origin
self.uuid = pres_uuid or str(uuid.uuid4())
if not isinstance(self.uuid, basestring):
raise AttributeError("pres_uuid must be a string")
def publish( self, args ) :
"""
#**
#* Publish
#*
#* Send a message to a channel.
#*
#* @param array args with channel and message.
#* @return array success information.
#**
## Publish Example
info = pubnub.publish({
'channel' : 'hello_world',
'message' : {
'some_text' : 'Hello my World'
}
})
print(info)
"""
## Fail if bad input.
if not (args['channel'] and args['message']) :
return [ 0, 'Missing Channel or Message' ]
## Capture User Input
channel = str(args['channel'])
message = json.dumps(args['message'], separators=(',',':'))
## Sign Message
if self.secret_key :
signature = hashlib.md5('/'.join([
self.publish_key,
self.subscribe_key,
self.secret_key,
channel,
message
])).hexdigest()
else :
signature = '0'
## Send Message
return self._request([
'publish',
self.publish_key,
self.subscribe_key,
signature,
channel,
'0',
message
])
def subscribe( self, args ) :
"""
#**
#* Subscribe
#*
#* This is BLOCKING.
#* Listen for a message on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Subscribe Example
def receive(message) :
print(message)
return True
pubnub.subscribe({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
## Begin Subscribe
while True :
timetoken = 'timetoken' in args and args['timetoken'] or 0
try :
## Wait for Message
response = self._request(self._encode([
'subscribe',
subscribe_key,
channel,
'0',
str(timetoken)
])+['?uuid='+self.uuid], encode=False)
messages = response[0]
args['timetoken'] = response[1]
## If it was a timeout
if not len(messages) :
continue
## Run user Callback and Reconnect if user permits.
for message in messages :
if not callback(message) :
return
except Exception:
time.sleep(1)
return True
def presence( self, args ) :
"""
#**
#* presence
#*
#* This is BLOCKING.
#* Listen for presence events on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Presence Example
def pres_event(message) :
print(message)
return True
pubnub.presence({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
return self.subscribe({'channel': channel+'-pnpres', 'subscribe_key':subscribe_key, 'callback': callback})
def here_now( self, args ) :
"""
#**
#* Here Now
#*
#* Load current occupancy from a channel.
#*
#* @param array args with 'channel'.
#* @return mixed false on fail, array on success.
#*
## Presence Example
here_now = pubnub.here_now({
'channel' : 'hello_world',
})
print(here_now['occupancy'])
print(here_now['uuids'])
"""
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get Presence Here Now
return self._request([
'v2','presence',
'sub_key', self.subscribe_key,
'channel', channel
]);
def history( self, args ) :
"""
#**
#* History
#*
#* Load history from a channel.
#*
#* @param array args with 'channel' and 'limit'.
#* @return mixed false on fail, array on success.
#*
## History Example
history = pubnub.history({
'channel' : 'hello_world',
'limit' : 1
})
print(history)
"""
## Capture User Input
limit = args.has_key('limit') and int(args['limit']) or 10
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get History
return self._request([
'history',
self.subscribe_key,
channel,
'0',
str(limit)
]);
def time(self) :
"""
#**
#* Time
#*
#* Timestamp from PubNub Cloud.
#*
#* @return int timestamp.
#*
## PubNub Server Time Example
timestamp = pubnub.time()
print(timestamp)
"""
return self._request([
'time',
'0'
])[0]
def _encode( self, request ) :
return [
"".join([ ' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace( '0x', '%' ).upper() or
ch for ch in list(bit)
]) for bit in request]
def _request( self, request, origin = None, encode = True ) :
## Build URL
url = (origin or self.origin) + '/' + "/".join(
encode and self._encode(request) or request
)
## Send Request Expecting JSONP Response
try:
try: usock = urllib2.urlopen( url, None, 200 )
except TypeError: usock = urllib2.urlopen( url, None )
response = usock.read()
usock.close()
return json.loads( response )
except:
return None
|
publish
|
#**
#* Publish
#*
#* Send a message to a channel.
#*
#* @param array args with channel and message.
#* @return array success information.
#**
## Publish Example
info = pubnub.publish({
'channel' : 'hello_world',
'message' : {
'some_text' : 'Hello my World'
}
})
print(info)
|
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.0 Real-time Push Cloud API
## -----------------------------------
try: import json
except ImportError: import simplejson as json
import time
import hashlib
import urllib2
import uuid
class Pubnub():
def __init__(
self,
publish_key,
subscribe_key,
secret_key = False,
ssl_on = False,
origin = 'pubsub.pubnub.com',
pres_uuid = None
) :
"""
#**
#* Pubnub
#*
#* Init the Pubnub Client API
#*
#* @param string publish_key required key to send messages.
#* @param string subscribe_key required key to receive messages.
#* @param string secret_key optional key to sign messages.
#* @param boolean ssl required for 2048 bit encrypted messages.
#* @param string origin PUBNUB Server Origin.
#* @param string pres_uuid optional identifier for presence (auto-generated if not supplied)
#**
## Initiat Class
pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )
"""
self.origin = origin
self.limit = 1800
self.publish_key = publish_key
self.subscribe_key = subscribe_key
self.secret_key = secret_key
self.ssl = ssl_on
if self.ssl :
self.origin = 'https://' + self.origin
else :
self.origin = 'http://' + self.origin
self.uuid = pres_uuid or str(uuid.uuid4())
if not isinstance(self.uuid, basestring):
raise AttributeError("pres_uuid must be a string")
# MASKED: publish function (lines 65-115)
def subscribe( self, args ) :
"""
#**
#* Subscribe
#*
#* This is BLOCKING.
#* Listen for a message on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Subscribe Example
def receive(message) :
print(message)
return True
pubnub.subscribe({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
## Begin Subscribe
while True :
timetoken = 'timetoken' in args and args['timetoken'] or 0
try :
## Wait for Message
response = self._request(self._encode([
'subscribe',
subscribe_key,
channel,
'0',
str(timetoken)
])+['?uuid='+self.uuid], encode=False)
messages = response[0]
args['timetoken'] = response[1]
## If it was a timeout
if not len(messages) :
continue
## Run user Callback and Reconnect if user permits.
for message in messages :
if not callback(message) :
return
except Exception:
time.sleep(1)
return True
def presence( self, args ) :
"""
#**
#* presence
#*
#* This is BLOCKING.
#* Listen for presence events on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Presence Example
def pres_event(message) :
print(message)
return True
pubnub.presence({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
return self.subscribe({'channel': channel+'-pnpres', 'subscribe_key':subscribe_key, 'callback': callback})
def here_now( self, args ) :
"""
#**
#* Here Now
#*
#* Load current occupancy from a channel.
#*
#* @param array args with 'channel'.
#* @return mixed false on fail, array on success.
#*
## Presence Example
here_now = pubnub.here_now({
'channel' : 'hello_world',
})
print(here_now['occupancy'])
print(here_now['uuids'])
"""
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get Presence Here Now
return self._request([
'v2','presence',
'sub_key', self.subscribe_key,
'channel', channel
]);
def history( self, args ) :
"""
#**
#* History
#*
#* Load history from a channel.
#*
#* @param array args with 'channel' and 'limit'.
#* @return mixed false on fail, array on success.
#*
## History Example
history = pubnub.history({
'channel' : 'hello_world',
'limit' : 1
})
print(history)
"""
## Capture User Input
limit = args.has_key('limit') and int(args['limit']) or 10
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get History
return self._request([
'history',
self.subscribe_key,
channel,
'0',
str(limit)
]);
def time(self) :
"""
#**
#* Time
#*
#* Timestamp from PubNub Cloud.
#*
#* @return int timestamp.
#*
## PubNub Server Time Example
timestamp = pubnub.time()
print(timestamp)
"""
return self._request([
'time',
'0'
])[0]
def _encode( self, request ) :
return [
"".join([ ' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace( '0x', '%' ).upper() or
ch for ch in list(bit)
]) for bit in request]
def _request( self, request, origin = None, encode = True ) :
## Build URL
url = (origin or self.origin) + '/' + "/".join(
encode and self._encode(request) or request
)
## Send Request Expecting JSONP Response
try:
try: usock = urllib2.urlopen( url, None, 200 )
except TypeError: usock = urllib2.urlopen( url, None )
response = usock.read()
usock.close()
return json.loads( response )
except:
return None
|
def publish( self, args ) :
"""
#**
#* Publish
#*
#* Send a message to a channel.
#*
#* @param array args with channel and message.
#* @return array success information.
#**
## Publish Example
info = pubnub.publish({
'channel' : 'hello_world',
'message' : {
'some_text' : 'Hello my World'
}
})
print(info)
"""
## Fail if bad input.
if not (args['channel'] and args['message']) :
return [ 0, 'Missing Channel or Message' ]
## Capture User Input
channel = str(args['channel'])
message = json.dumps(args['message'], separators=(',',':'))
## Sign Message
if self.secret_key :
signature = hashlib.md5('/'.join([
self.publish_key,
self.subscribe_key,
self.secret_key,
channel,
message
])).hexdigest()
else :
signature = '0'
## Send Message
return self._request([
'publish',
self.publish_key,
self.subscribe_key,
signature,
channel,
'0',
message
])
| 65
| 115
|
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.0 Real-time Push Cloud API
## -----------------------------------
try: import json
except ImportError: import simplejson as json
import time
import hashlib
import urllib2
import uuid
class Pubnub():
def __init__(
self,
publish_key,
subscribe_key,
secret_key = False,
ssl_on = False,
origin = 'pubsub.pubnub.com',
pres_uuid = None
) :
"""
#**
#* Pubnub
#*
#* Init the Pubnub Client API
#*
#* @param string publish_key required key to send messages.
#* @param string subscribe_key required key to receive messages.
#* @param string secret_key optional key to sign messages.
#* @param boolean ssl required for 2048 bit encrypted messages.
#* @param string origin PUBNUB Server Origin.
#* @param string pres_uuid optional identifier for presence (auto-generated if not supplied)
#**
## Initiat Class
pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )
"""
self.origin = origin
self.limit = 1800
self.publish_key = publish_key
self.subscribe_key = subscribe_key
self.secret_key = secret_key
self.ssl = ssl_on
if self.ssl :
self.origin = 'https://' + self.origin
else :
self.origin = 'http://' + self.origin
self.uuid = pres_uuid or str(uuid.uuid4())
if not isinstance(self.uuid, basestring):
raise AttributeError("pres_uuid must be a string")
def publish( self, args ) :
"""
#**
#* Publish
#*
#* Send a message to a channel.
#*
#* @param array args with channel and message.
#* @return array success information.
#**
## Publish Example
info = pubnub.publish({
'channel' : 'hello_world',
'message' : {
'some_text' : 'Hello my World'
}
})
print(info)
"""
## Fail if bad input.
if not (args['channel'] and args['message']) :
return [ 0, 'Missing Channel or Message' ]
## Capture User Input
channel = str(args['channel'])
message = json.dumps(args['message'], separators=(',',':'))
## Sign Message
if self.secret_key :
signature = hashlib.md5('/'.join([
self.publish_key,
self.subscribe_key,
self.secret_key,
channel,
message
])).hexdigest()
else :
signature = '0'
## Send Message
return self._request([
'publish',
self.publish_key,
self.subscribe_key,
signature,
channel,
'0',
message
])
def subscribe( self, args ) :
"""
#**
#* Subscribe
#*
#* This is BLOCKING.
#* Listen for a message on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Subscribe Example
def receive(message) :
print(message)
return True
pubnub.subscribe({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
## Begin Subscribe
while True :
timetoken = 'timetoken' in args and args['timetoken'] or 0
try :
## Wait for Message
response = self._request(self._encode([
'subscribe',
subscribe_key,
channel,
'0',
str(timetoken)
])+['?uuid='+self.uuid], encode=False)
messages = response[0]
args['timetoken'] = response[1]
## If it was a timeout
if not len(messages) :
continue
## Run user Callback and Reconnect if user permits.
for message in messages :
if not callback(message) :
return
except Exception:
time.sleep(1)
return True
def presence( self, args ) :
"""
#**
#* presence
#*
#* This is BLOCKING.
#* Listen for presence events on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Presence Example
def pres_event(message) :
print(message)
return True
pubnub.presence({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
return self.subscribe({'channel': channel+'-pnpres', 'subscribe_key':subscribe_key, 'callback': callback})
def here_now( self, args ) :
"""
#**
#* Here Now
#*
#* Load current occupancy from a channel.
#*
#* @param array args with 'channel'.
#* @return mixed false on fail, array on success.
#*
## Presence Example
here_now = pubnub.here_now({
'channel' : 'hello_world',
})
print(here_now['occupancy'])
print(here_now['uuids'])
"""
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get Presence Here Now
return self._request([
'v2','presence',
'sub_key', self.subscribe_key,
'channel', channel
]);
def history( self, args ) :
"""
#**
#* History
#*
#* Load history from a channel.
#*
#* @param array args with 'channel' and 'limit'.
#* @return mixed false on fail, array on success.
#*
## History Example
history = pubnub.history({
'channel' : 'hello_world',
'limit' : 1
})
print(history)
"""
## Capture User Input
limit = args.has_key('limit') and int(args['limit']) or 10
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get History
return self._request([
'history',
self.subscribe_key,
channel,
'0',
str(limit)
]);
def time(self) :
"""
#**
#* Time
#*
#* Timestamp from PubNub Cloud.
#*
#* @return int timestamp.
#*
## PubNub Server Time Example
timestamp = pubnub.time()
print(timestamp)
"""
return self._request([
'time',
'0'
])[0]
def _encode( self, request ) :
return [
"".join([ ' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace( '0x', '%' ).upper() or
ch for ch in list(bit)
]) for bit in request]
def _request( self, request, origin = None, encode = True ) :
## Build URL
url = (origin or self.origin) + '/' + "/".join(
encode and self._encode(request) or request
)
## Send Request Expecting JSONP Response
try:
try: usock = urllib2.urlopen( url, None, 200 )
except TypeError: usock = urllib2.urlopen( url, None )
response = usock.read()
usock.close()
return json.loads( response )
except:
return None
|
subscribe
|
#**
#* Subscribe
#*
#* This is BLOCKING.
#* Listen for a message on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Subscribe Example
def receive(message) :
print(message)
return True
pubnub.subscribe({
'channel' : 'hello_world',
'callback' : receive
})
|
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.0 Real-time Push Cloud API
## -----------------------------------
try: import json
except ImportError: import simplejson as json
import time
import hashlib
import urllib2
import uuid
class Pubnub():
def __init__(
self,
publish_key,
subscribe_key,
secret_key = False,
ssl_on = False,
origin = 'pubsub.pubnub.com',
pres_uuid = None
) :
"""
#**
#* Pubnub
#*
#* Init the Pubnub Client API
#*
#* @param string publish_key required key to send messages.
#* @param string subscribe_key required key to receive messages.
#* @param string secret_key optional key to sign messages.
#* @param boolean ssl required for 2048 bit encrypted messages.
#* @param string origin PUBNUB Server Origin.
#* @param string pres_uuid optional identifier for presence (auto-generated if not supplied)
#**
## Initiat Class
pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )
"""
self.origin = origin
self.limit = 1800
self.publish_key = publish_key
self.subscribe_key = subscribe_key
self.secret_key = secret_key
self.ssl = ssl_on
if self.ssl :
self.origin = 'https://' + self.origin
else :
self.origin = 'http://' + self.origin
self.uuid = pres_uuid or str(uuid.uuid4())
if not isinstance(self.uuid, basestring):
raise AttributeError("pres_uuid must be a string")
def publish( self, args ) :
"""
#**
#* Publish
#*
#* Send a message to a channel.
#*
#* @param array args with channel and message.
#* @return array success information.
#**
## Publish Example
info = pubnub.publish({
'channel' : 'hello_world',
'message' : {
'some_text' : 'Hello my World'
}
})
print(info)
"""
## Fail if bad input.
if not (args['channel'] and args['message']) :
return [ 0, 'Missing Channel or Message' ]
## Capture User Input
channel = str(args['channel'])
message = json.dumps(args['message'], separators=(',',':'))
## Sign Message
if self.secret_key :
signature = hashlib.md5('/'.join([
self.publish_key,
self.subscribe_key,
self.secret_key,
channel,
message
])).hexdigest()
else :
signature = '0'
## Send Message
return self._request([
'publish',
self.publish_key,
self.subscribe_key,
signature,
channel,
'0',
message
])
# MASKED: subscribe function (lines 118-186)
def presence( self, args ) :
"""
#**
#* presence
#*
#* This is BLOCKING.
#* Listen for presence events on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Presence Example
def pres_event(message) :
print(message)
return True
pubnub.presence({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
return self.subscribe({'channel': channel+'-pnpres', 'subscribe_key':subscribe_key, 'callback': callback})
def here_now( self, args ) :
"""
#**
#* Here Now
#*
#* Load current occupancy from a channel.
#*
#* @param array args with 'channel'.
#* @return mixed false on fail, array on success.
#*
## Presence Example
here_now = pubnub.here_now({
'channel' : 'hello_world',
})
print(here_now['occupancy'])
print(here_now['uuids'])
"""
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get Presence Here Now
return self._request([
'v2','presence',
'sub_key', self.subscribe_key,
'channel', channel
]);
def history( self, args ) :
"""
#**
#* History
#*
#* Load history from a channel.
#*
#* @param array args with 'channel' and 'limit'.
#* @return mixed false on fail, array on success.
#*
## History Example
history = pubnub.history({
'channel' : 'hello_world',
'limit' : 1
})
print(history)
"""
## Capture User Input
limit = args.has_key('limit') and int(args['limit']) or 10
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get History
return self._request([
'history',
self.subscribe_key,
channel,
'0',
str(limit)
]);
def time(self) :
"""
#**
#* Time
#*
#* Timestamp from PubNub Cloud.
#*
#* @return int timestamp.
#*
## PubNub Server Time Example
timestamp = pubnub.time()
print(timestamp)
"""
return self._request([
'time',
'0'
])[0]
def _encode( self, request ) :
return [
"".join([ ' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace( '0x', '%' ).upper() or
ch for ch in list(bit)
]) for bit in request]
def _request( self, request, origin = None, encode = True ) :
## Build URL
url = (origin or self.origin) + '/' + "/".join(
encode and self._encode(request) or request
)
## Send Request Expecting JSONP Response
try:
try: usock = urllib2.urlopen( url, None, 200 )
except TypeError: usock = urllib2.urlopen( url, None )
response = usock.read()
usock.close()
return json.loads( response )
except:
return None
|
def subscribe( self, args ) :
"""
#**
#* Subscribe
#*
#* This is BLOCKING.
#* Listen for a message on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Subscribe Example
def receive(message) :
print(message)
return True
pubnub.subscribe({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
## Begin Subscribe
while True :
timetoken = 'timetoken' in args and args['timetoken'] or 0
try :
## Wait for Message
response = self._request(self._encode([
'subscribe',
subscribe_key,
channel,
'0',
str(timetoken)
])+['?uuid='+self.uuid], encode=False)
messages = response[0]
args['timetoken'] = response[1]
## If it was a timeout
if not len(messages) :
continue
## Run user Callback and Reconnect if user permits.
for message in messages :
if not callback(message) :
return
except Exception:
time.sleep(1)
return True
| 118
| 186
|
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.0 Real-time Push Cloud API
## -----------------------------------
try: import json
except ImportError: import simplejson as json
import time
import hashlib
import urllib2
import uuid
class Pubnub():
def __init__(
self,
publish_key,
subscribe_key,
secret_key = False,
ssl_on = False,
origin = 'pubsub.pubnub.com',
pres_uuid = None
) :
"""
#**
#* Pubnub
#*
#* Init the Pubnub Client API
#*
#* @param string publish_key required key to send messages.
#* @param string subscribe_key required key to receive messages.
#* @param string secret_key optional key to sign messages.
#* @param boolean ssl required for 2048 bit encrypted messages.
#* @param string origin PUBNUB Server Origin.
#* @param string pres_uuid optional identifier for presence (auto-generated if not supplied)
#**
## Initiat Class
pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )
"""
self.origin = origin
self.limit = 1800
self.publish_key = publish_key
self.subscribe_key = subscribe_key
self.secret_key = secret_key
self.ssl = ssl_on
if self.ssl :
self.origin = 'https://' + self.origin
else :
self.origin = 'http://' + self.origin
self.uuid = pres_uuid or str(uuid.uuid4())
if not isinstance(self.uuid, basestring):
raise AttributeError("pres_uuid must be a string")
def publish( self, args ) :
"""
#**
#* Publish
#*
#* Send a message to a channel.
#*
#* @param array args with channel and message.
#* @return array success information.
#**
## Publish Example
info = pubnub.publish({
'channel' : 'hello_world',
'message' : {
'some_text' : 'Hello my World'
}
})
print(info)
"""
## Fail if bad input.
if not (args['channel'] and args['message']) :
return [ 0, 'Missing Channel or Message' ]
## Capture User Input
channel = str(args['channel'])
message = json.dumps(args['message'], separators=(',',':'))
## Sign Message
if self.secret_key :
signature = hashlib.md5('/'.join([
self.publish_key,
self.subscribe_key,
self.secret_key,
channel,
message
])).hexdigest()
else :
signature = '0'
## Send Message
return self._request([
'publish',
self.publish_key,
self.subscribe_key,
signature,
channel,
'0',
message
])
def subscribe( self, args ) :
"""
#**
#* Subscribe
#*
#* This is BLOCKING.
#* Listen for a message on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Subscribe Example
def receive(message) :
print(message)
return True
pubnub.subscribe({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
## Begin Subscribe
while True :
timetoken = 'timetoken' in args and args['timetoken'] or 0
try :
## Wait for Message
response = self._request(self._encode([
'subscribe',
subscribe_key,
channel,
'0',
str(timetoken)
])+['?uuid='+self.uuid], encode=False)
messages = response[0]
args['timetoken'] = response[1]
## If it was a timeout
if not len(messages) :
continue
## Run user Callback and Reconnect if user permits.
for message in messages :
if not callback(message) :
return
except Exception:
time.sleep(1)
return True
def presence( self, args ) :
"""
#**
#* presence
#*
#* This is BLOCKING.
#* Listen for presence events on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Presence Example
def pres_event(message) :
print(message)
return True
pubnub.presence({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
return self.subscribe({'channel': channel+'-pnpres', 'subscribe_key':subscribe_key, 'callback': callback})
def here_now( self, args ) :
"""
#**
#* Here Now
#*
#* Load current occupancy from a channel.
#*
#* @param array args with 'channel'.
#* @return mixed false on fail, array on success.
#*
## Presence Example
here_now = pubnub.here_now({
'channel' : 'hello_world',
})
print(here_now['occupancy'])
print(here_now['uuids'])
"""
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get Presence Here Now
return self._request([
'v2','presence',
'sub_key', self.subscribe_key,
'channel', channel
]);
def history( self, args ) :
"""
#**
#* History
#*
#* Load history from a channel.
#*
#* @param array args with 'channel' and 'limit'.
#* @return mixed false on fail, array on success.
#*
## History Example
history = pubnub.history({
'channel' : 'hello_world',
'limit' : 1
})
print(history)
"""
## Capture User Input
limit = args.has_key('limit') and int(args['limit']) or 10
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get History
return self._request([
'history',
self.subscribe_key,
channel,
'0',
str(limit)
]);
def time(self) :
"""
#**
#* Time
#*
#* Timestamp from PubNub Cloud.
#*
#* @return int timestamp.
#*
## PubNub Server Time Example
timestamp = pubnub.time()
print(timestamp)
"""
return self._request([
'time',
'0'
])[0]
def _encode( self, request ) :
return [
"".join([ ' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace( '0x', '%' ).upper() or
ch for ch in list(bit)
]) for bit in request]
def _request( self, request, origin = None, encode = True ) :
## Build URL
url = (origin or self.origin) + '/' + "/".join(
encode and self._encode(request) or request
)
## Send Request Expecting JSONP Response
try:
try: usock = urllib2.urlopen( url, None, 200 )
except TypeError: usock = urllib2.urlopen( url, None )
response = usock.read()
usock.close()
return json.loads( response )
except:
return None
|
presence
|
#**
#* presence
#*
#* This is BLOCKING.
#* Listen for presence events on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Presence Example
def pres_event(message) :
print(message)
return True
pubnub.presence({
'channel' : 'hello_world',
'callback' : receive
})
|
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.0 Real-time Push Cloud API
## -----------------------------------
try: import json
except ImportError: import simplejson as json
import time
import hashlib
import urllib2
import uuid
class Pubnub():
def __init__(
self,
publish_key,
subscribe_key,
secret_key = False,
ssl_on = False,
origin = 'pubsub.pubnub.com',
pres_uuid = None
) :
"""
#**
#* Pubnub
#*
#* Init the Pubnub Client API
#*
#* @param string publish_key required key to send messages.
#* @param string subscribe_key required key to receive messages.
#* @param string secret_key optional key to sign messages.
#* @param boolean ssl required for 2048 bit encrypted messages.
#* @param string origin PUBNUB Server Origin.
#* @param string pres_uuid optional identifier for presence (auto-generated if not supplied)
#**
## Initiat Class
pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )
"""
self.origin = origin
self.limit = 1800
self.publish_key = publish_key
self.subscribe_key = subscribe_key
self.secret_key = secret_key
self.ssl = ssl_on
if self.ssl :
self.origin = 'https://' + self.origin
else :
self.origin = 'http://' + self.origin
self.uuid = pres_uuid or str(uuid.uuid4())
if not isinstance(self.uuid, basestring):
raise AttributeError("pres_uuid must be a string")
def publish( self, args ) :
"""
#**
#* Publish
#*
#* Send a message to a channel.
#*
#* @param array args with channel and message.
#* @return array success information.
#**
## Publish Example
info = pubnub.publish({
'channel' : 'hello_world',
'message' : {
'some_text' : 'Hello my World'
}
})
print(info)
"""
## Fail if bad input.
if not (args['channel'] and args['message']) :
return [ 0, 'Missing Channel or Message' ]
## Capture User Input
channel = str(args['channel'])
message = json.dumps(args['message'], separators=(',',':'))
## Sign Message
if self.secret_key :
signature = hashlib.md5('/'.join([
self.publish_key,
self.subscribe_key,
self.secret_key,
channel,
message
])).hexdigest()
else :
signature = '0'
## Send Message
return self._request([
'publish',
self.publish_key,
self.subscribe_key,
signature,
channel,
'0',
message
])
def subscribe( self, args ) :
"""
#**
#* Subscribe
#*
#* This is BLOCKING.
#* Listen for a message on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Subscribe Example
def receive(message) :
print(message)
return True
pubnub.subscribe({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
## Begin Subscribe
while True :
timetoken = 'timetoken' in args and args['timetoken'] or 0
try :
## Wait for Message
response = self._request(self._encode([
'subscribe',
subscribe_key,
channel,
'0',
str(timetoken)
])+['?uuid='+self.uuid], encode=False)
messages = response[0]
args['timetoken'] = response[1]
## If it was a timeout
if not len(messages) :
continue
## Run user Callback and Reconnect if user permits.
for message in messages :
if not callback(message) :
return
except Exception:
time.sleep(1)
return True
# MASKED: presence function (lines 188-226)
def here_now( self, args ) :
"""
#**
#* Here Now
#*
#* Load current occupancy from a channel.
#*
#* @param array args with 'channel'.
#* @return mixed false on fail, array on success.
#*
## Presence Example
here_now = pubnub.here_now({
'channel' : 'hello_world',
})
print(here_now['occupancy'])
print(here_now['uuids'])
"""
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get Presence Here Now
return self._request([
'v2','presence',
'sub_key', self.subscribe_key,
'channel', channel
]);
def history( self, args ) :
"""
#**
#* History
#*
#* Load history from a channel.
#*
#* @param array args with 'channel' and 'limit'.
#* @return mixed false on fail, array on success.
#*
## History Example
history = pubnub.history({
'channel' : 'hello_world',
'limit' : 1
})
print(history)
"""
## Capture User Input
limit = args.has_key('limit') and int(args['limit']) or 10
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get History
return self._request([
'history',
self.subscribe_key,
channel,
'0',
str(limit)
]);
def time(self) :
"""
#**
#* Time
#*
#* Timestamp from PubNub Cloud.
#*
#* @return int timestamp.
#*
## PubNub Server Time Example
timestamp = pubnub.time()
print(timestamp)
"""
return self._request([
'time',
'0'
])[0]
def _encode( self, request ) :
return [
"".join([ ' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace( '0x', '%' ).upper() or
ch for ch in list(bit)
]) for bit in request]
def _request( self, request, origin = None, encode = True ) :
## Build URL
url = (origin or self.origin) + '/' + "/".join(
encode and self._encode(request) or request
)
## Send Request Expecting JSONP Response
try:
try: usock = urllib2.urlopen( url, None, 200 )
except TypeError: usock = urllib2.urlopen( url, None )
response = usock.read()
usock.close()
return json.loads( response )
except:
return None
|
def presence( self, args ) :
"""
#**
#* presence
#*
#* This is BLOCKING.
#* Listen for presence events on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Presence Example
def pres_event(message) :
print(message)
return True
pubnub.presence({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
return self.subscribe({'channel': channel+'-pnpres', 'subscribe_key':subscribe_key, 'callback': callback})
| 188
| 226
|
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.0 Real-time Push Cloud API
## -----------------------------------
try: import json
except ImportError: import simplejson as json
import time
import hashlib
import urllib2
import uuid
class Pubnub():
def __init__(
self,
publish_key,
subscribe_key,
secret_key = False,
ssl_on = False,
origin = 'pubsub.pubnub.com',
pres_uuid = None
) :
"""
#**
#* Pubnub
#*
#* Init the Pubnub Client API
#*
#* @param string publish_key required key to send messages.
#* @param string subscribe_key required key to receive messages.
#* @param string secret_key optional key to sign messages.
#* @param boolean ssl required for 2048 bit encrypted messages.
#* @param string origin PUBNUB Server Origin.
#* @param string pres_uuid optional identifier for presence (auto-generated if not supplied)
#**
## Initiat Class
pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )
"""
self.origin = origin
self.limit = 1800
self.publish_key = publish_key
self.subscribe_key = subscribe_key
self.secret_key = secret_key
self.ssl = ssl_on
if self.ssl :
self.origin = 'https://' + self.origin
else :
self.origin = 'http://' + self.origin
self.uuid = pres_uuid or str(uuid.uuid4())
if not isinstance(self.uuid, basestring):
raise AttributeError("pres_uuid must be a string")
def publish( self, args ) :
"""
#**
#* Publish
#*
#* Send a message to a channel.
#*
#* @param array args with channel and message.
#* @return array success information.
#**
## Publish Example
info = pubnub.publish({
'channel' : 'hello_world',
'message' : {
'some_text' : 'Hello my World'
}
})
print(info)
"""
## Fail if bad input.
if not (args['channel'] and args['message']) :
return [ 0, 'Missing Channel or Message' ]
## Capture User Input
channel = str(args['channel'])
message = json.dumps(args['message'], separators=(',',':'))
## Sign Message
if self.secret_key :
signature = hashlib.md5('/'.join([
self.publish_key,
self.subscribe_key,
self.secret_key,
channel,
message
])).hexdigest()
else :
signature = '0'
## Send Message
return self._request([
'publish',
self.publish_key,
self.subscribe_key,
signature,
channel,
'0',
message
])
def subscribe( self, args ) :
"""
#**
#* Subscribe
#*
#* This is BLOCKING.
#* Listen for a message on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Subscribe Example
def receive(message) :
print(message)
return True
pubnub.subscribe({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
## Begin Subscribe
while True :
timetoken = 'timetoken' in args and args['timetoken'] or 0
try :
## Wait for Message
response = self._request(self._encode([
'subscribe',
subscribe_key,
channel,
'0',
str(timetoken)
])+['?uuid='+self.uuid], encode=False)
messages = response[0]
args['timetoken'] = response[1]
## If it was a timeout
if not len(messages) :
continue
## Run user Callback and Reconnect if user permits.
for message in messages :
if not callback(message) :
return
except Exception:
time.sleep(1)
return True
def presence( self, args ) :
"""
#**
#* presence
#*
#* This is BLOCKING.
#* Listen for presence events on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Presence Example
def pres_event(message) :
print(message)
return True
pubnub.presence({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
return self.subscribe({'channel': channel+'-pnpres', 'subscribe_key':subscribe_key, 'callback': callback})
def here_now( self, args ) :
"""
#**
#* Here Now
#*
#* Load current occupancy from a channel.
#*
#* @param array args with 'channel'.
#* @return mixed false on fail, array on success.
#*
## Presence Example
here_now = pubnub.here_now({
'channel' : 'hello_world',
})
print(here_now['occupancy'])
print(here_now['uuids'])
"""
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get Presence Here Now
return self._request([
'v2','presence',
'sub_key', self.subscribe_key,
'channel', channel
]);
def history( self, args ) :
"""
#**
#* History
#*
#* Load history from a channel.
#*
#* @param array args with 'channel' and 'limit'.
#* @return mixed false on fail, array on success.
#*
## History Example
history = pubnub.history({
'channel' : 'hello_world',
'limit' : 1
})
print(history)
"""
## Capture User Input
limit = args.has_key('limit') and int(args['limit']) or 10
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get History
return self._request([
'history',
self.subscribe_key,
channel,
'0',
str(limit)
]);
def time(self) :
"""
#**
#* Time
#*
#* Timestamp from PubNub Cloud.
#*
#* @return int timestamp.
#*
## PubNub Server Time Example
timestamp = pubnub.time()
print(timestamp)
"""
return self._request([
'time',
'0'
])[0]
def _encode( self, request ) :
return [
"".join([ ' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace( '0x', '%' ).upper() or
ch for ch in list(bit)
]) for bit in request]
def _request( self, request, origin = None, encode = True ) :
## Build URL
url = (origin or self.origin) + '/' + "/".join(
encode and self._encode(request) or request
)
## Send Request Expecting JSONP Response
try:
try: usock = urllib2.urlopen( url, None, 200 )
except TypeError: usock = urllib2.urlopen( url, None )
response = usock.read()
usock.close()
return json.loads( response )
except:
return None
|
__init__
|
Create a connection pool. If max_connections is set, then this
object raises redis.ConnectionError when the pool's limit is reached.
By default, TCP connections are created connection_class is specified.
Use redis.UnixDomainSocketConnection for unix sockets.
Any additional keyword arguments are passed to the constructor of
connection_class.
|
from __future__ import with_statement
from distutils.version import StrictVersion
from itertools import chain
from select import select
import os
import socket
import sys
import threading
import warnings
try:
import ssl
ssl_available = True
except ImportError:
ssl_available = False
from redis._compat import (b, xrange, imap, byte_to_chr, unicode, bytes, long,
BytesIO, nativestr, basestring, iteritems,
LifoQueue, Empty, Full, urlparse, parse_qs,
unquote)
from redis.exceptions import (
RedisError,
ConnectionError,
TimeoutError,
BusyLoadingError,
ResponseError,
InvalidResponse,
AuthenticationError,
NoScriptError,
ExecAbortError,
ReadOnlyError
)
from redis.utils import HIREDIS_AVAILABLE
if HIREDIS_AVAILABLE:
import hiredis
hiredis_version = StrictVersion(hiredis.__version__)
HIREDIS_SUPPORTS_CALLABLE_ERRORS = \
hiredis_version >= StrictVersion('0.1.3')
HIREDIS_SUPPORTS_BYTE_BUFFER = \
hiredis_version >= StrictVersion('0.1.4')
if not HIREDIS_SUPPORTS_BYTE_BUFFER:
msg = ("redis-py works best with hiredis >= 0.1.4. You're running "
"hiredis %s. Please consider upgrading." % hiredis.__version__)
warnings.warn(msg)
HIREDIS_USE_BYTE_BUFFER = True
# only use byte buffer if hiredis supports it and the Python version
# is >= 2.7
if not HIREDIS_SUPPORTS_BYTE_BUFFER or (
sys.version_info[0] == 2 and sys.version_info[1] < 7):
HIREDIS_USE_BYTE_BUFFER = False
SYM_STAR = b('*')
SYM_DOLLAR = b('$')
SYM_CRLF = b('\r\n')
SYM_EMPTY = b('')
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
class Token(object):
"""
Literal strings in Redis commands, such as the command names and any
hard-coded arguments are wrapped in this class so we know not to apply
and encoding rules on them.
"""
def __init__(self, value):
if isinstance(value, Token):
value = value.value
self.value = value
def __repr__(self):
return self.value
def __str__(self):
return self.value
class BaseParser(object):
EXCEPTION_CLASSES = {
'ERR': ResponseError,
'EXECABORT': ExecAbortError,
'LOADING': BusyLoadingError,
'NOSCRIPT': NoScriptError,
'READONLY': ReadOnlyError,
}
def parse_error(self, response):
"Parse an error response"
error_code = response.split(' ')[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1:]
return self.EXCEPTION_CLASSES[error_code](response)
return ResponseError(response)
class SocketBuffer(object):
def __init__(self, socket, socket_read_size):
self._sock = socket
self.socket_read_size = socket_read_size
self._buffer = BytesIO()
# number of bytes written to the buffer from the socket
self.bytes_written = 0
# number of bytes read from the buffer
self.bytes_read = 0
@property
def length(self):
return self.bytes_written - self.bytes_read
def _read_from_socket(self, length=None):
socket_read_size = self.socket_read_size
buf = self._buffer
buf.seek(self.bytes_written)
marker = 0
try:
while True:
data = self._sock.recv(socket_read_size)
# an empty string indicates the server shutdown the socket
if isinstance(data, bytes) and len(data) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
self.bytes_written += data_length
marker += data_length
if length is not None and length > marker:
continue
break
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
def read(self, length):
length = length + 2 # make sure to read the \r\n terminator
# make sure we've read enough data from the socket
if length > self.length:
self._read_from_socket(length - self.length)
self._buffer.seek(self.bytes_read)
data = self._buffer.read(length)
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def readline(self):
buf = self._buffer
buf.seek(self.bytes_read)
data = buf.readline()
while not data.endswith(SYM_CRLF):
# there's more data in the socket that we need
self._read_from_socket()
buf.seek(self.bytes_read)
data = buf.readline()
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def purge(self):
self._buffer.seek(0)
self._buffer.truncate()
self.bytes_written = 0
self.bytes_read = 0
def close(self):
self.purge()
self._buffer.close()
self._buffer = None
self._sock = None
class PythonParser(BaseParser):
"Plain Python parsing class"
encoding = None
def __init__(self, socket_read_size):
self.socket_read_size = socket_read_size
self._sock = None
self._buffer = None
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
"Called when the socket connects"
self._sock = connection._sock
self._buffer = SocketBuffer(self._sock, self.socket_read_size)
if connection.decode_responses:
self.encoding = connection.encoding
def on_disconnect(self):
"Called when the socket disconnects"
if self._sock is not None:
self._sock.close()
self._sock = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoding = None
def can_read(self):
return self._buffer and bool(self._buffer.length)
def read_response(self):
response = self._buffer.readline()
if not response:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
byte, response = byte_to_chr(response[0]), response[1:]
if byte not in ('-', '+', ':', '$', '*'):
raise InvalidResponse("Protocol Error: %s, %s" %
(str(byte), str(response)))
# server returned an error
if byte == '-':
response = nativestr(response)
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == '+':
pass
# int value
elif byte == ':':
response = long(response)
# bulk response
elif byte == '$':
length = int(response)
if length == -1:
return None
response = self._buffer.read(length)
# multi-bulk response
elif byte == '*':
length = int(response)
if length == -1:
return None
response = [self.read_response() for i in xrange(length)]
if isinstance(response, bytes) and self.encoding:
response = response.decode(self.encoding)
return response
class HiredisParser(BaseParser):
"Parser class for connections using Hiredis"
def __init__(self, socket_read_size):
if not HIREDIS_AVAILABLE:
raise RedisError("Hiredis is not installed")
self.socket_read_size = socket_read_size
if HIREDIS_USE_BYTE_BUFFER:
self._buffer = bytearray(socket_read_size)
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
self._sock = connection._sock
kwargs = {
'protocolError': InvalidResponse,
'replyError': self.parse_error,
}
# hiredis < 0.1.3 doesn't support functions that create exceptions
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
kwargs['replyError'] = ResponseError
if connection.decode_responses:
kwargs['encoding'] = connection.encoding
self._reader = hiredis.Reader(**kwargs)
self._next_response = False
def on_disconnect(self):
self._sock = None
self._reader = None
self._next_response = False
def can_read(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
if self._next_response is False:
self._next_response = self._reader.gets()
return self._next_response is not False
def read_response(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
# _next_response might be cached from a can_read() call
if self._next_response is not False:
response = self._next_response
self._next_response = False
return response
response = self._reader.gets()
socket_read_size = self.socket_read_size
while response is False:
try:
if HIREDIS_USE_BYTE_BUFFER:
bufflen = self._sock.recv_into(self._buffer)
if bufflen == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
else:
buffer = self._sock.recv(socket_read_size)
# an empty string indicates the server shutdown the socket
if not isinstance(buffer, bytes) or len(buffer) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
if HIREDIS_USE_BYTE_BUFFER:
self._reader.feed(self._buffer, 0, bufflen)
else:
self._reader.feed(buffer)
# proactively, but not conclusively, check if more data is in the
# buffer. if the data received doesn't end with \r\n, there's more.
if HIREDIS_USE_BYTE_BUFFER:
if bufflen > 2 and \
self._buffer[bufflen - 2:bufflen] != SYM_CRLF:
continue
else:
if not buffer.endswith(SYM_CRLF):
continue
response = self._reader.gets()
# if an older version of hiredis is installed, we need to attempt
# to convert ResponseErrors to their appropriate types.
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
if isinstance(response, ResponseError):
response = self.parse_error(response.args[0])
elif isinstance(response, list) and response and \
isinstance(response[0], ResponseError):
response[0] = self.parse_error(response[0].args[0])
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
elif isinstance(response, list) and response and \
isinstance(response[0], ConnectionError):
raise response[0]
return response
if HIREDIS_AVAILABLE:
DefaultParser = HiredisParser
else:
DefaultParser = PythonParser
class Connection(object):
"Manages TCP communication to and from a Redis server"
description_format = "Connection<host=%(host)s,port=%(port)s,db=%(db)s>"
def __init__(self, host='localhost', port=6379, db=0, password=None,
socket_timeout=None, socket_connect_timeout=None,
socket_keepalive=False, socket_keepalive_options=None,
retry_on_timeout=False, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
parser_class=DefaultParser, socket_read_size=65536):
self.pid = os.getpid()
self.host = host
self.port = int(port)
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout or socket_timeout
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.retry_on_timeout = retry_on_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._description_args = {
'host': self.host,
'port': self.port,
'db': self.db,
}
self._connect_callbacks = []
def __repr__(self):
return self.description_format % self._description_args
def __del__(self):
try:
self.disconnect()
except Exception:
pass
def register_connect_callback(self, callback):
self._connect_callbacks.append(callback)
def clear_connect_callbacks(self):
self._connect_callbacks = []
def connect(self):
"Connects to the Redis server if not already connected"
if self._sock:
return
try:
sock = self._connect()
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError(self._error_message(e))
self._sock = sock
try:
self.on_connect()
except RedisError:
# clean up after any error in on_connect
self.disconnect()
raise
# run any user callbacks. right now the only internal callback
# is for pubsub channel/pattern resubscription
for callback in self._connect_callbacks:
callback(self)
def _connect(self):
"Create a TCP socket connection"
# we want to mimic what socket.create_connection does to support
# ipv4/ipv6, but we want to set options prior to calling
# socket.connect()
err = None
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
family, socktype, proto, canonname, socket_address = res
sock = None
try:
sock = socket.socket(family, socktype, proto)
# TCP_NODELAY
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
for k, v in iteritems(self.socket_keepalive_options):
sock.setsockopt(socket.SOL_TCP, k, v)
# set the socket_connect_timeout before we connect
sock.settimeout(self.socket_connect_timeout)
# connect
sock.connect(socket_address)
# set the socket_timeout now that we're connected
sock.settimeout(self.socket_timeout)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
raise socket.error("socket.getaddrinfo returned an empty list")
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting to %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
"Initialize the connection, authenticate and select a database"
self._parser.on_connect(self)
# if a password is specified, authenticate
if self.password:
self.send_command('AUTH', self.password)
if nativestr(self.read_response()) != 'OK':
raise AuthenticationError('Invalid Password')
# if a database is specified, switch to it
if self.db:
self.send_command('SELECT', self.db)
if nativestr(self.read_response()) != 'OK':
raise ConnectionError('Invalid Database')
def disconnect(self):
"Disconnects from the Redis server"
self._parser.on_disconnect()
if self._sock is None:
return
try:
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
except socket.error:
pass
self._sock = None
def send_packed_command(self, command):
"Send an already packed command to the Redis server"
if not self._sock:
self.connect()
try:
if isinstance(command, str):
command = [command]
for item in command:
self._sock.sendall(item)
except socket.timeout:
self.disconnect()
raise TimeoutError("Timeout writing to socket")
except socket.error:
e = sys.exc_info()[1]
self.disconnect()
if len(e.args) == 1:
_errno, errmsg = 'UNKNOWN', e.args[0]
else:
_errno, errmsg = e.args
raise ConnectionError("Error %s while writing to socket. %s." %
(_errno, errmsg))
except:
self.disconnect()
raise
def send_command(self, *args):
"Pack and send a command to the Redis server"
self.send_packed_command(self.pack_command(*args))
def can_read(self, timeout=0):
"Poll the socket to see if there's data that can be read."
sock = self._sock
if not sock:
self.connect()
sock = self._sock
return self._parser.can_read() or \
bool(select([sock], [], [], timeout)[0])
def read_response(self):
"Read the response from a previously sent command"
try:
response = self._parser.read_response()
except:
self.disconnect()
raise
if isinstance(response, ResponseError):
raise response
return response
def encode(self, value):
"Return a bytestring representation of the value"
if isinstance(value, Token):
return b(value.value)
elif isinstance(value, bytes):
return value
elif isinstance(value, (int, long)):
value = b(str(value))
elif isinstance(value, float):
value = b(repr(value))
elif not isinstance(value, basestring):
value = str(value)
if isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
def pack_command(self, *args):
"Pack a series of arguments into the Redis protocol"
output = []
# the client might have included 1 or more literal arguments in
# the command name, e.g., 'CONFIG GET'. The Redis server expects these
# arguments to be sent separately, so split the first argument
# manually. All of these arguements get wrapped in the Token class
# to prevent them from being encoded.
command = args[0]
if ' ' in command:
args = tuple([Token(s) for s in command.split(' ')]) + args[1:]
else:
args = (Token(command),) + args[1:]
buff = SYM_EMPTY.join(
(SYM_STAR, b(str(len(args))), SYM_CRLF))
for arg in imap(self.encode, args):
# to avoid large string mallocs, chunk the command into the
# output list if we're sending large values
if len(buff) > 6000 or len(arg) > 6000:
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF))
output.append(buff)
output.append(arg)
buff = SYM_CRLF
else:
buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(arg))),
SYM_CRLF, arg, SYM_CRLF))
output.append(buff)
return output
def pack_commands(self, commands):
"Pack multiple commands into the Redis protocol"
output = []
pieces = []
buffer_length = 0
for cmd in commands:
for chunk in self.pack_command(*cmd):
pieces.append(chunk)
buffer_length += len(chunk)
if buffer_length > 6000:
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
if pieces:
output.append(SYM_EMPTY.join(pieces))
return output
class SSLConnection(Connection):
description_format = "SSLConnection<host=%(host)s,port=%(port)s,db=%(db)s>"
def __init__(self, ssl_keyfile=None, ssl_certfile=None, ssl_cert_reqs=None,
ssl_ca_certs=None, **kwargs):
if not ssl_available:
raise RedisError("Python wasn't built with SSL support")
super(SSLConnection, self).__init__(**kwargs)
self.keyfile = ssl_keyfile
self.certfile = ssl_certfile
if ssl_cert_reqs is None:
ssl_cert_reqs = ssl.CERT_NONE
elif isinstance(ssl_cert_reqs, basestring):
CERT_REQS = {
'none': ssl.CERT_NONE,
'optional': ssl.CERT_OPTIONAL,
'required': ssl.CERT_REQUIRED
}
if ssl_cert_reqs not in CERT_REQS:
raise RedisError(
"Invalid SSL Certificate Requirements Flag: %s" %
ssl_cert_reqs)
ssl_cert_reqs = CERT_REQS[ssl_cert_reqs]
self.cert_reqs = ssl_cert_reqs
self.ca_certs = ssl_ca_certs
def _connect(self):
"Wrap the socket with SSL support"
sock = super(SSLConnection, self)._connect()
sock = ssl.wrap_socket(sock,
cert_reqs=self.cert_reqs,
keyfile=self.keyfile,
certfile=self.certfile,
ca_certs=self.ca_certs)
return sock
class UnixDomainSocketConnection(Connection):
description_format = "UnixDomainSocketConnection<path=%(path)s,db=%(db)s>"
def __init__(self, path='', db=0, password=None,
socket_timeout=None, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
retry_on_timeout=False,
parser_class=DefaultParser, socket_read_size=65536):
self.pid = os.getpid()
self.path = path
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.retry_on_timeout = retry_on_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._description_args = {
'path': self.path,
'db': self.db,
}
self._connect_callbacks = []
def _connect(self):
"Create a Unix domain socket connection"
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect(self.path)
return sock
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to unix socket: %s. %s." % \
(self.path, exception.args[0])
else:
return "Error %s connecting to unix socket: %s. %s." % \
(exception.args[0], self.path, exception.args[1])
class ConnectionPool(object):
"Generic connection pool"
@classmethod
def from_url(cls, url, db=None, decode_components=False, **kwargs):
"""
Return a connection pool configured from the given URL.
For example::
redis://[:password]@localhost:6379/0
rediss://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
Three URL schemes are supported:
redis:// creates a normal TCP socket connection
rediss:// creates a SSL wrapped TCP socket connection
unix:// creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
The ``decode_components`` argument allows this function to work with
percent-encoded URLs. If this argument is set to ``True`` all ``%xx``
escapes will be replaced by their single-character equivalents after
the URL has been parsed. This only applies to the ``hostname``,
``path``, and ``password`` components.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win.
"""
url_string = url
url = urlparse(url)
qs = ''
# in python2.6, custom URL schemes don't recognize querystring values
# they're left as part of the url.path.
if '?' in url.path and not url.query:
# chop the querystring including the ? off the end of the url
# and reparse it.
qs = url.path.split('?', 1)[1]
url = urlparse(url_string[:-(len(qs) + 1)])
else:
qs = url.query
url_options = {}
for name, value in iteritems(parse_qs(qs)):
if value and len(value) > 0:
url_options[name] = value[0]
if decode_components:
password = unquote(url.password) if url.password else None
path = unquote(url.path) if url.path else None
hostname = unquote(url.hostname) if url.hostname else None
else:
password = url.password
path = url.path
hostname = url.hostname
# We only support redis:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'password': password,
'path': path,
'connection_class': UnixDomainSocketConnection,
})
else:
url_options.update({
'host': hostname,
'port': int(url.port or 6379),
'password': password,
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and path:
try:
url_options['db'] = int(path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
url_options['connection_class'] = SSLConnection
# last shot at the db value
url_options['db'] = int(url_options.get('db', db or 0))
# update the arguments from the URL values
kwargs.update(url_options)
# backwards compatability
if 'charset' in kwargs:
warnings.warn(DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'))
kwargs['encoding'] = kwargs.pop('charset')
if 'errors' in kwargs:
warnings.warn(DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'))
kwargs['encoding_errors'] = kwargs.pop('errors')
return cls(**kwargs)
# MASKED: __init__ function (lines 841-861)
def __repr__(self):
return "%s<%s>" % (
type(self).__name__,
self.connection_class.description_format % self.connection_kwargs,
)
def reset(self):
self.pid = os.getpid()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
self._check_lock = threading.Lock()
def _checkpid(self):
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
# another thread already did the work while we waited
# on the lock.
return
self.disconnect()
self.reset()
def get_connection(self, command_name, *keys, **options):
"Get a connection from the pool"
self._checkpid()
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
return connection
def make_connection(self):
"Create a new connection"
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
def release(self, connection):
"Releases the connection back to the pool"
self._checkpid()
if connection.pid != self.pid:
return
self._in_use_connections.remove(connection)
self._available_connections.append(connection)
def disconnect(self):
"Disconnects all connections in the pool"
all_conns = chain(self._available_connections,
self._in_use_connections)
for connection in all_conns:
connection.disconnect()
class BlockingConnectionPool(ConnectionPool):
"""
Thread-safe blocking connection pool::
>>> from redis.client import Redis
>>> client = Redis(connection_pool=BlockingConnectionPool())
It performs the same function as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation, in that,
it maintains a pool of reusable connections that can be shared by
multiple redis clients (safely across threads if required).
The difference is that, in the event that a client tries to get a
connection from the pool when all of connections are in use, rather than
raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation does), it
makes the client wait ("blocks") for a specified number of seconds until
a connection becomes available.
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
# Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
# Raise a ``ConnectionError`` after five seconds if a connection is
# not available.
>>> pool = BlockingConnectionPool(timeout=5)
"""
def __init__(self, max_connections=50, timeout=20,
connection_class=Connection, queue_class=LifoQueue,
**connection_kwargs):
self.queue_class = queue_class
self.timeout = timeout
super(BlockingConnectionPool, self).__init__(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs)
def reset(self):
self.pid = os.getpid()
self._check_lock = threading.Lock()
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except Full:
break
# Keep a list of actual connection instances so that we can
# disconnect them later.
self._connections = []
def make_connection(self):
"Make a fresh connection."
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
def get_connection(self, command_name, *keys, **options):
"""
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
"""
# Make sure we haven't changed process.
self._checkpid()
# Try and get a connection from the pool. If one isn't available within
# self.timeout then raise a ``ConnectionError``.
connection = None
try:
connection = self.pool.get(block=True, timeout=self.timeout)
except Empty:
# Note that this is not caught by the redis client and will be
# raised unless handled by application code. If you want never to
raise ConnectionError("No connection available.")
# If the ``connection`` is actually ``None`` then that's a cue to make
# a new connection to add to the pool.
if connection is None:
connection = self.make_connection()
return connection
def release(self, connection):
"Releases the connection back to the pool."
# Make sure we haven't changed process.
self._checkpid()
if connection.pid != self.pid:
return
# Put the connection back into the pool.
try:
self.pool.put_nowait(connection)
except Full:
# perhaps the pool has been reset() after a fork? regardless,
# we don't want this connection
pass
def disconnect(self):
"Disconnects all connections in the pool."
for connection in self._connections:
connection.disconnect()
|
def __init__(self, connection_class=Connection, max_connections=None,
**connection_kwargs):
"""
Create a connection pool. If max_connections is set, then this
object raises redis.ConnectionError when the pool's limit is reached.
By default, TCP connections are created connection_class is specified.
Use redis.UnixDomainSocketConnection for unix sockets.
Any additional keyword arguments are passed to the constructor of
connection_class.
"""
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, (int, long)) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
self.reset()
| 841
| 861
|
from __future__ import with_statement
from distutils.version import StrictVersion
from itertools import chain
from select import select
import os
import socket
import sys
import threading
import warnings
try:
import ssl
ssl_available = True
except ImportError:
ssl_available = False
from redis._compat import (b, xrange, imap, byte_to_chr, unicode, bytes, long,
BytesIO, nativestr, basestring, iteritems,
LifoQueue, Empty, Full, urlparse, parse_qs,
unquote)
from redis.exceptions import (
RedisError,
ConnectionError,
TimeoutError,
BusyLoadingError,
ResponseError,
InvalidResponse,
AuthenticationError,
NoScriptError,
ExecAbortError,
ReadOnlyError
)
from redis.utils import HIREDIS_AVAILABLE
if HIREDIS_AVAILABLE:
import hiredis
hiredis_version = StrictVersion(hiredis.__version__)
HIREDIS_SUPPORTS_CALLABLE_ERRORS = \
hiredis_version >= StrictVersion('0.1.3')
HIREDIS_SUPPORTS_BYTE_BUFFER = \
hiredis_version >= StrictVersion('0.1.4')
if not HIREDIS_SUPPORTS_BYTE_BUFFER:
msg = ("redis-py works best with hiredis >= 0.1.4. You're running "
"hiredis %s. Please consider upgrading." % hiredis.__version__)
warnings.warn(msg)
HIREDIS_USE_BYTE_BUFFER = True
# only use byte buffer if hiredis supports it and the Python version
# is >= 2.7
if not HIREDIS_SUPPORTS_BYTE_BUFFER or (
sys.version_info[0] == 2 and sys.version_info[1] < 7):
HIREDIS_USE_BYTE_BUFFER = False
SYM_STAR = b('*')
SYM_DOLLAR = b('$')
SYM_CRLF = b('\r\n')
SYM_EMPTY = b('')
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
class Token(object):
"""
Literal strings in Redis commands, such as the command names and any
hard-coded arguments are wrapped in this class so we know not to apply
and encoding rules on them.
"""
def __init__(self, value):
if isinstance(value, Token):
value = value.value
self.value = value
def __repr__(self):
return self.value
def __str__(self):
return self.value
class BaseParser(object):
EXCEPTION_CLASSES = {
'ERR': ResponseError,
'EXECABORT': ExecAbortError,
'LOADING': BusyLoadingError,
'NOSCRIPT': NoScriptError,
'READONLY': ReadOnlyError,
}
def parse_error(self, response):
"Parse an error response"
error_code = response.split(' ')[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1:]
return self.EXCEPTION_CLASSES[error_code](response)
return ResponseError(response)
class SocketBuffer(object):
def __init__(self, socket, socket_read_size):
self._sock = socket
self.socket_read_size = socket_read_size
self._buffer = BytesIO()
# number of bytes written to the buffer from the socket
self.bytes_written = 0
# number of bytes read from the buffer
self.bytes_read = 0
@property
def length(self):
return self.bytes_written - self.bytes_read
def _read_from_socket(self, length=None):
socket_read_size = self.socket_read_size
buf = self._buffer
buf.seek(self.bytes_written)
marker = 0
try:
while True:
data = self._sock.recv(socket_read_size)
# an empty string indicates the server shutdown the socket
if isinstance(data, bytes) and len(data) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
self.bytes_written += data_length
marker += data_length
if length is not None and length > marker:
continue
break
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
def read(self, length):
length = length + 2 # make sure to read the \r\n terminator
# make sure we've read enough data from the socket
if length > self.length:
self._read_from_socket(length - self.length)
self._buffer.seek(self.bytes_read)
data = self._buffer.read(length)
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def readline(self):
buf = self._buffer
buf.seek(self.bytes_read)
data = buf.readline()
while not data.endswith(SYM_CRLF):
# there's more data in the socket that we need
self._read_from_socket()
buf.seek(self.bytes_read)
data = buf.readline()
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def purge(self):
self._buffer.seek(0)
self._buffer.truncate()
self.bytes_written = 0
self.bytes_read = 0
def close(self):
self.purge()
self._buffer.close()
self._buffer = None
self._sock = None
class PythonParser(BaseParser):
"Plain Python parsing class"
encoding = None
def __init__(self, socket_read_size):
self.socket_read_size = socket_read_size
self._sock = None
self._buffer = None
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
"Called when the socket connects"
self._sock = connection._sock
self._buffer = SocketBuffer(self._sock, self.socket_read_size)
if connection.decode_responses:
self.encoding = connection.encoding
def on_disconnect(self):
"Called when the socket disconnects"
if self._sock is not None:
self._sock.close()
self._sock = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoding = None
def can_read(self):
return self._buffer and bool(self._buffer.length)
def read_response(self):
response = self._buffer.readline()
if not response:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
byte, response = byte_to_chr(response[0]), response[1:]
if byte not in ('-', '+', ':', '$', '*'):
raise InvalidResponse("Protocol Error: %s, %s" %
(str(byte), str(response)))
# server returned an error
if byte == '-':
response = nativestr(response)
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == '+':
pass
# int value
elif byte == ':':
response = long(response)
# bulk response
elif byte == '$':
length = int(response)
if length == -1:
return None
response = self._buffer.read(length)
# multi-bulk response
elif byte == '*':
length = int(response)
if length == -1:
return None
response = [self.read_response() for i in xrange(length)]
if isinstance(response, bytes) and self.encoding:
response = response.decode(self.encoding)
return response
class HiredisParser(BaseParser):
"Parser class for connections using Hiredis"
def __init__(self, socket_read_size):
if not HIREDIS_AVAILABLE:
raise RedisError("Hiredis is not installed")
self.socket_read_size = socket_read_size
if HIREDIS_USE_BYTE_BUFFER:
self._buffer = bytearray(socket_read_size)
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
self._sock = connection._sock
kwargs = {
'protocolError': InvalidResponse,
'replyError': self.parse_error,
}
# hiredis < 0.1.3 doesn't support functions that create exceptions
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
kwargs['replyError'] = ResponseError
if connection.decode_responses:
kwargs['encoding'] = connection.encoding
self._reader = hiredis.Reader(**kwargs)
self._next_response = False
def on_disconnect(self):
self._sock = None
self._reader = None
self._next_response = False
def can_read(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
if self._next_response is False:
self._next_response = self._reader.gets()
return self._next_response is not False
def read_response(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
# _next_response might be cached from a can_read() call
if self._next_response is not False:
response = self._next_response
self._next_response = False
return response
response = self._reader.gets()
socket_read_size = self.socket_read_size
while response is False:
try:
if HIREDIS_USE_BYTE_BUFFER:
bufflen = self._sock.recv_into(self._buffer)
if bufflen == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
else:
buffer = self._sock.recv(socket_read_size)
# an empty string indicates the server shutdown the socket
if not isinstance(buffer, bytes) or len(buffer) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
if HIREDIS_USE_BYTE_BUFFER:
self._reader.feed(self._buffer, 0, bufflen)
else:
self._reader.feed(buffer)
# proactively, but not conclusively, check if more data is in the
# buffer. if the data received doesn't end with \r\n, there's more.
if HIREDIS_USE_BYTE_BUFFER:
if bufflen > 2 and \
self._buffer[bufflen - 2:bufflen] != SYM_CRLF:
continue
else:
if not buffer.endswith(SYM_CRLF):
continue
response = self._reader.gets()
# if an older version of hiredis is installed, we need to attempt
# to convert ResponseErrors to their appropriate types.
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
if isinstance(response, ResponseError):
response = self.parse_error(response.args[0])
elif isinstance(response, list) and response and \
isinstance(response[0], ResponseError):
response[0] = self.parse_error(response[0].args[0])
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
elif isinstance(response, list) and response and \
isinstance(response[0], ConnectionError):
raise response[0]
return response
if HIREDIS_AVAILABLE:
DefaultParser = HiredisParser
else:
DefaultParser = PythonParser
class Connection(object):
"Manages TCP communication to and from a Redis server"
description_format = "Connection<host=%(host)s,port=%(port)s,db=%(db)s>"
def __init__(self, host='localhost', port=6379, db=0, password=None,
socket_timeout=None, socket_connect_timeout=None,
socket_keepalive=False, socket_keepalive_options=None,
retry_on_timeout=False, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
parser_class=DefaultParser, socket_read_size=65536):
self.pid = os.getpid()
self.host = host
self.port = int(port)
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout or socket_timeout
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.retry_on_timeout = retry_on_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._description_args = {
'host': self.host,
'port': self.port,
'db': self.db,
}
self._connect_callbacks = []
def __repr__(self):
return self.description_format % self._description_args
def __del__(self):
try:
self.disconnect()
except Exception:
pass
def register_connect_callback(self, callback):
self._connect_callbacks.append(callback)
def clear_connect_callbacks(self):
self._connect_callbacks = []
def connect(self):
"Connects to the Redis server if not already connected"
if self._sock:
return
try:
sock = self._connect()
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError(self._error_message(e))
self._sock = sock
try:
self.on_connect()
except RedisError:
# clean up after any error in on_connect
self.disconnect()
raise
# run any user callbacks. right now the only internal callback
# is for pubsub channel/pattern resubscription
for callback in self._connect_callbacks:
callback(self)
def _connect(self):
"Create a TCP socket connection"
# we want to mimic what socket.create_connection does to support
# ipv4/ipv6, but we want to set options prior to calling
# socket.connect()
err = None
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
family, socktype, proto, canonname, socket_address = res
sock = None
try:
sock = socket.socket(family, socktype, proto)
# TCP_NODELAY
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
for k, v in iteritems(self.socket_keepalive_options):
sock.setsockopt(socket.SOL_TCP, k, v)
# set the socket_connect_timeout before we connect
sock.settimeout(self.socket_connect_timeout)
# connect
sock.connect(socket_address)
# set the socket_timeout now that we're connected
sock.settimeout(self.socket_timeout)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
raise socket.error("socket.getaddrinfo returned an empty list")
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting to %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
"Initialize the connection, authenticate and select a database"
self._parser.on_connect(self)
# if a password is specified, authenticate
if self.password:
self.send_command('AUTH', self.password)
if nativestr(self.read_response()) != 'OK':
raise AuthenticationError('Invalid Password')
# if a database is specified, switch to it
if self.db:
self.send_command('SELECT', self.db)
if nativestr(self.read_response()) != 'OK':
raise ConnectionError('Invalid Database')
def disconnect(self):
"Disconnects from the Redis server"
self._parser.on_disconnect()
if self._sock is None:
return
try:
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
except socket.error:
pass
self._sock = None
def send_packed_command(self, command):
"Send an already packed command to the Redis server"
if not self._sock:
self.connect()
try:
if isinstance(command, str):
command = [command]
for item in command:
self._sock.sendall(item)
except socket.timeout:
self.disconnect()
raise TimeoutError("Timeout writing to socket")
except socket.error:
e = sys.exc_info()[1]
self.disconnect()
if len(e.args) == 1:
_errno, errmsg = 'UNKNOWN', e.args[0]
else:
_errno, errmsg = e.args
raise ConnectionError("Error %s while writing to socket. %s." %
(_errno, errmsg))
except:
self.disconnect()
raise
def send_command(self, *args):
"Pack and send a command to the Redis server"
self.send_packed_command(self.pack_command(*args))
def can_read(self, timeout=0):
"Poll the socket to see if there's data that can be read."
sock = self._sock
if not sock:
self.connect()
sock = self._sock
return self._parser.can_read() or \
bool(select([sock], [], [], timeout)[0])
def read_response(self):
"Read the response from a previously sent command"
try:
response = self._parser.read_response()
except:
self.disconnect()
raise
if isinstance(response, ResponseError):
raise response
return response
def encode(self, value):
"Return a bytestring representation of the value"
if isinstance(value, Token):
return b(value.value)
elif isinstance(value, bytes):
return value
elif isinstance(value, (int, long)):
value = b(str(value))
elif isinstance(value, float):
value = b(repr(value))
elif not isinstance(value, basestring):
value = str(value)
if isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
def pack_command(self, *args):
"Pack a series of arguments into the Redis protocol"
output = []
# the client might have included 1 or more literal arguments in
# the command name, e.g., 'CONFIG GET'. The Redis server expects these
# arguments to be sent separately, so split the first argument
# manually. All of these arguements get wrapped in the Token class
# to prevent them from being encoded.
command = args[0]
if ' ' in command:
args = tuple([Token(s) for s in command.split(' ')]) + args[1:]
else:
args = (Token(command),) + args[1:]
buff = SYM_EMPTY.join(
(SYM_STAR, b(str(len(args))), SYM_CRLF))
for arg in imap(self.encode, args):
# to avoid large string mallocs, chunk the command into the
# output list if we're sending large values
if len(buff) > 6000 or len(arg) > 6000:
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF))
output.append(buff)
output.append(arg)
buff = SYM_CRLF
else:
buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(arg))),
SYM_CRLF, arg, SYM_CRLF))
output.append(buff)
return output
def pack_commands(self, commands):
"Pack multiple commands into the Redis protocol"
output = []
pieces = []
buffer_length = 0
for cmd in commands:
for chunk in self.pack_command(*cmd):
pieces.append(chunk)
buffer_length += len(chunk)
if buffer_length > 6000:
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
if pieces:
output.append(SYM_EMPTY.join(pieces))
return output
class SSLConnection(Connection):
description_format = "SSLConnection<host=%(host)s,port=%(port)s,db=%(db)s>"
def __init__(self, ssl_keyfile=None, ssl_certfile=None, ssl_cert_reqs=None,
ssl_ca_certs=None, **kwargs):
if not ssl_available:
raise RedisError("Python wasn't built with SSL support")
super(SSLConnection, self).__init__(**kwargs)
self.keyfile = ssl_keyfile
self.certfile = ssl_certfile
if ssl_cert_reqs is None:
ssl_cert_reqs = ssl.CERT_NONE
elif isinstance(ssl_cert_reqs, basestring):
CERT_REQS = {
'none': ssl.CERT_NONE,
'optional': ssl.CERT_OPTIONAL,
'required': ssl.CERT_REQUIRED
}
if ssl_cert_reqs not in CERT_REQS:
raise RedisError(
"Invalid SSL Certificate Requirements Flag: %s" %
ssl_cert_reqs)
ssl_cert_reqs = CERT_REQS[ssl_cert_reqs]
self.cert_reqs = ssl_cert_reqs
self.ca_certs = ssl_ca_certs
def _connect(self):
"Wrap the socket with SSL support"
sock = super(SSLConnection, self)._connect()
sock = ssl.wrap_socket(sock,
cert_reqs=self.cert_reqs,
keyfile=self.keyfile,
certfile=self.certfile,
ca_certs=self.ca_certs)
return sock
class UnixDomainSocketConnection(Connection):
description_format = "UnixDomainSocketConnection<path=%(path)s,db=%(db)s>"
def __init__(self, path='', db=0, password=None,
socket_timeout=None, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
retry_on_timeout=False,
parser_class=DefaultParser, socket_read_size=65536):
self.pid = os.getpid()
self.path = path
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.retry_on_timeout = retry_on_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._description_args = {
'path': self.path,
'db': self.db,
}
self._connect_callbacks = []
def _connect(self):
"Create a Unix domain socket connection"
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect(self.path)
return sock
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to unix socket: %s. %s." % \
(self.path, exception.args[0])
else:
return "Error %s connecting to unix socket: %s. %s." % \
(exception.args[0], self.path, exception.args[1])
class ConnectionPool(object):
"Generic connection pool"
@classmethod
def from_url(cls, url, db=None, decode_components=False, **kwargs):
"""
Return a connection pool configured from the given URL.
For example::
redis://[:password]@localhost:6379/0
rediss://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
Three URL schemes are supported:
redis:// creates a normal TCP socket connection
rediss:// creates a SSL wrapped TCP socket connection
unix:// creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
The ``decode_components`` argument allows this function to work with
percent-encoded URLs. If this argument is set to ``True`` all ``%xx``
escapes will be replaced by their single-character equivalents after
the URL has been parsed. This only applies to the ``hostname``,
``path``, and ``password`` components.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win.
"""
url_string = url
url = urlparse(url)
qs = ''
# in python2.6, custom URL schemes don't recognize querystring values
# they're left as part of the url.path.
if '?' in url.path and not url.query:
# chop the querystring including the ? off the end of the url
# and reparse it.
qs = url.path.split('?', 1)[1]
url = urlparse(url_string[:-(len(qs) + 1)])
else:
qs = url.query
url_options = {}
for name, value in iteritems(parse_qs(qs)):
if value and len(value) > 0:
url_options[name] = value[0]
if decode_components:
password = unquote(url.password) if url.password else None
path = unquote(url.path) if url.path else None
hostname = unquote(url.hostname) if url.hostname else None
else:
password = url.password
path = url.path
hostname = url.hostname
# We only support redis:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'password': password,
'path': path,
'connection_class': UnixDomainSocketConnection,
})
else:
url_options.update({
'host': hostname,
'port': int(url.port or 6379),
'password': password,
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and path:
try:
url_options['db'] = int(path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
url_options['connection_class'] = SSLConnection
# last shot at the db value
url_options['db'] = int(url_options.get('db', db or 0))
# update the arguments from the URL values
kwargs.update(url_options)
# backwards compatability
if 'charset' in kwargs:
warnings.warn(DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'))
kwargs['encoding'] = kwargs.pop('charset')
if 'errors' in kwargs:
warnings.warn(DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'))
kwargs['encoding_errors'] = kwargs.pop('errors')
return cls(**kwargs)
def __init__(self, connection_class=Connection, max_connections=None,
**connection_kwargs):
"""
Create a connection pool. If max_connections is set, then this
object raises redis.ConnectionError when the pool's limit is reached.
By default, TCP connections are created connection_class is specified.
Use redis.UnixDomainSocketConnection for unix sockets.
Any additional keyword arguments are passed to the constructor of
connection_class.
"""
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, (int, long)) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
self.reset()
def __repr__(self):
return "%s<%s>" % (
type(self).__name__,
self.connection_class.description_format % self.connection_kwargs,
)
def reset(self):
self.pid = os.getpid()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
self._check_lock = threading.Lock()
def _checkpid(self):
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
# another thread already did the work while we waited
# on the lock.
return
self.disconnect()
self.reset()
def get_connection(self, command_name, *keys, **options):
"Get a connection from the pool"
self._checkpid()
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
return connection
def make_connection(self):
"Create a new connection"
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
def release(self, connection):
"Releases the connection back to the pool"
self._checkpid()
if connection.pid != self.pid:
return
self._in_use_connections.remove(connection)
self._available_connections.append(connection)
def disconnect(self):
"Disconnects all connections in the pool"
all_conns = chain(self._available_connections,
self._in_use_connections)
for connection in all_conns:
connection.disconnect()
class BlockingConnectionPool(ConnectionPool):
"""
Thread-safe blocking connection pool::
>>> from redis.client import Redis
>>> client = Redis(connection_pool=BlockingConnectionPool())
It performs the same function as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation, in that,
it maintains a pool of reusable connections that can be shared by
multiple redis clients (safely across threads if required).
The difference is that, in the event that a client tries to get a
connection from the pool when all of connections are in use, rather than
raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation does), it
makes the client wait ("blocks") for a specified number of seconds until
a connection becomes available.
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
# Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
# Raise a ``ConnectionError`` after five seconds if a connection is
# not available.
>>> pool = BlockingConnectionPool(timeout=5)
"""
def __init__(self, max_connections=50, timeout=20,
connection_class=Connection, queue_class=LifoQueue,
**connection_kwargs):
self.queue_class = queue_class
self.timeout = timeout
super(BlockingConnectionPool, self).__init__(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs)
def reset(self):
self.pid = os.getpid()
self._check_lock = threading.Lock()
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except Full:
break
# Keep a list of actual connection instances so that we can
# disconnect them later.
self._connections = []
def make_connection(self):
"Make a fresh connection."
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
def get_connection(self, command_name, *keys, **options):
"""
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
"""
# Make sure we haven't changed process.
self._checkpid()
# Try and get a connection from the pool. If one isn't available within
# self.timeout then raise a ``ConnectionError``.
connection = None
try:
connection = self.pool.get(block=True, timeout=self.timeout)
except Empty:
# Note that this is not caught by the redis client and will be
# raised unless handled by application code. If you want never to
raise ConnectionError("No connection available.")
# If the ``connection`` is actually ``None`` then that's a cue to make
# a new connection to add to the pool.
if connection is None:
connection = self.make_connection()
return connection
def release(self, connection):
"Releases the connection back to the pool."
# Make sure we haven't changed process.
self._checkpid()
if connection.pid != self.pid:
return
# Put the connection back into the pool.
try:
self.pool.put_nowait(connection)
except Full:
# perhaps the pool has been reset() after a fork? regardless,
# we don't want this connection
pass
def disconnect(self):
"Disconnects all connections in the pool."
for connection in self._connections:
connection.disconnect()
|
ngram_processor
|
Given a sequence or iterable of arbitrary items, return an iterator of
item ngrams tuples of length ngram_len. Buffers at most ngram_len iterable
items.
For example::
>>> list(ngram_processor([1, 2, 3, 4, 5], ngram_len=3))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import json
import os
import codecs
import cPickle
from unittest.case import skipIf
from commoncode.testcase import FileBasedTesting
from textcode.analysis import DEFAULT_GAP
from textcode.analysis import NO_GAP
from textcode.analysis import InvalidGapError
from textcode.analysis import UnbalancedTemplateError
from textcode.analysis import Token
from textcode.analysis import word_splitter
from textcode.analysis import unigram_splitter
from textcode.analysis import unigram_tokenizer
from textcode.analysis import position_processor
from textcode.analysis import template_splitter
from textcode.analysis import template_processor
from textcode.analysis import ngram_to_token
from textcode.analysis import ngram_tokenizer
from textcode.analysis import tokens_ngram_processor
from textcode.analysis import doc_subset
from textcode.analysis import unicode_text_lines
from textcode.analysis import text_lines
#############################################################################
#
# Code style note: lines are not wrapped to PEP8 line length on purpose
# to keep the tests more readable
#
#############################################################################
class TestDocsubset(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_doc_subset_single_line(self):
doc = '''A simple test
with multiple
lines
of text
'''.splitlines()
pos = Token(start=0, end=0, start_line=1, start_char=8, end_line=1, end_char=21)
expected = '''with multiple'''
tst = doc_subset(iter(doc), pos)
result = '\n'.join(tst)
assert expected == result
def test_doc_subset_multilines(self):
doc = '''0123456789\n0123456789\n'''.splitlines()
pos = Token(start=0, end=0, start_line=0, start_char=0, end_line=0, end_char=10)
expected = '0123456789'
tst = doc_subset(iter(doc), pos)
result = ''.join(tst)
assert expected == result
def test_doc_subset(self):
doc = iter('''A simple test
with multiple
lines
of text
'''.splitlines())
pos = Token(start=3, end=54, start_line=1, start_char=8, end_line=2, end_char=11)
expected = u'''with multiple
lin'''
tst = doc_subset(iter(doc), pos)
result = u'\n'.join(tst)
assert expected == result
class TestAnalysis(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_text_lines_from_list_or_location_yield_same_results(self):
test_file = self.get_test_loc('analysis/bsd-new')
with open(test_file, 'rb') as inf:
test_strings_list = inf.read().splitlines(True)
# test when we are passing a location or a list
from_loc = list(text_lines(location=test_file))
from_list = list(text_lines(location=test_strings_list))
assert from_loc == from_list
class TestUnigrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_unigrams_word_splitter_handles_empty_string(self):
text = iter([''])
result = list(unigram_splitter(text, splitter=word_splitter))
assert [] == result
def test_unigrams_word_splitter_handles_blank_lines(self):
text = iter([u' ', u'', u'\t '])
result = list(unigram_splitter(text, splitter=word_splitter))
assert [] == result
def test_unigrams_word_splitter_can_split(self):
text = iter(u'abc def \n GHI'.splitlines())
result = list(unigram_splitter(text, splitter=word_splitter))
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=3, value=u'abc'),
Token(start_line=0, end_line=0, start_char=4, end_char=7, value=u'def'),
Token(start_line=1, end_line=1, start_char=1, end_char=4, value=u'ghi'),
]
assert expected == result
def test_unigrams_word_splitter_handles_empty_iterable(self):
text = iter([])
result = list(unigram_splitter(text, splitter=word_splitter))
assert [] == result
def test_unigrams_template_splitter_handles_empty_string(self):
text = iter([''])
result = list(unigram_splitter(text, splitter=template_splitter))
assert [] == result
def test_unigrams_template_splitter_handles_blank_lines(self):
text = iter([' ', '', '\t '])
result = list(unigram_splitter(text, splitter=template_splitter))
assert [] == result
def test_unigrams_template_splitter_handles_empty_iterable(self):
text = iter([])
result = list(unigram_splitter(text, splitter=template_splitter))
assert [] == result
def test_unigrams_template_splitter_can_split(self):
text = iter(u'abc def \n GHI'.splitlines())
result = list(unigram_splitter(text, splitter=template_splitter))
assert [u'abc', u'def', u'ghi'] == [x.value for x in result]
def test_unigrams_template_splitter_can_split_templates(self):
text = u'abc def \n {{temp}} GHI'.splitlines()
result = list(unigram_splitter(text, splitter=template_splitter))
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=3, value=u'abc'),
Token(start_line=0, end_line=0, start_char=4, end_char=7, value=u'def'),
Token(start_line=1, end_line=1, start_char=1, end_char=3, value=u'{{'),
Token(start_line=1, end_line=1, start_char=3, end_char=7, value=u'temp'),
Token(start_line=1, end_line=1, start_char=7, end_char=9, value=u'}}'),
Token(start_line=1, end_line=1, start_char=10, end_char=13, value=u'ghi'),
]
assert expected == result
def test_position_processor(self):
tokens = [
Token(value=u'abc'),
Token(value=u'def'),
Token(value=u'temp'),
Token(value=u'ghi'),
]
expected = [
Token(value=u'abc', start=0, end=0),
Token(value=u'def', start=1, end=1),
Token(value=u'temp', start=2, end=2),
Token(value=u'ghi', start=3, end=3),
]
result = list(position_processor(tokens))
assert expected == result
def test_unigram_tokenizer(self):
inp = u'''Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following
conditions are met:
Redistributions of source code must retain the above
copyright notice, this list of conditions and the following
disclaimer.'''
tst = list(unigram_tokenizer(inp.splitlines(True)))
assert 39 == len(tst)
expected = u'''redistribution and use in source and binary forms with or
without modification are permitted provided that the following
conditions are met redistributions of source code must retain the above
copyright notice this list of conditions and the following
disclaimer'''.split()
result = [t.value for t in tst]
assert expected == result
class TestTemplates(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def template_parsing(self, lines):
if isinstance(lines, basestring):
lines = lines.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
return list(template_processor(unigrams))
def test_process_template_handles_empty_templates_using_default_gap(self):
lines = [u'ab{{}}cd']
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=6, end_char=8, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_gap(self):
lines = u'ab{{10 nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=21, end_char=23, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_raise_invalid_gap_exception(self):
lines = u'ab{{151 nexb Company}}cd'
self.assertRaises(InvalidGapError, self.template_parsing, lines)
def test_process_template_recognizes_template_with_maxgap(self):
lines = u'ab{{150 nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=150),
Token(start_line=0, end_line=0, start_char=22, end_char=24, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_only_gap(self):
lines = u'ab{{10}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=8, end_char=10, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_only_gap_and_spaces(self):
lines = u'ab{{ 10 }}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=16, end_char=18, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_set_default_gap_if_none_is_specified(self):
lines = u'ab{{nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_set_default_gap_if_none_is_specified_ignoring_spaces(self):
lines = u'ab{{ \sdsdnexb Companysd }}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=28, end_char=30, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_can_process_multiple_templatized_regions_with_default_gap(self):
lines = u'ab{{nexb Company}}cd {{second}}ef'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=31, end_char=33, value=u'ef', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_can_process_multiple_templatized_regions_with_default_gap_and_custom_gaps(self):
lines = u'ab{{nexb Company}}cd{{12 second}}ef{{12 second}}gh'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=12),
Token(start_line=0, end_line=0, start_char=33, end_char=35, value=u'ef', gap=12),
Token(start_line=0, end_line=0, start_char=48, end_char=50, value=u'gh', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_handles_combination_of_well_formed_and_ill_formed_templates(self):
lines = u'ab{{c}}d}}ef'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=7, end_char=8, value=u'd', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=10, end_char=12, value=u'ef', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_handles_empty_lines(self):
lines = u'\n\n'
expected = []
assert expected == self.template_parsing(lines)
def test_process_template_handles_None(self):
lines = None
expected = []
assert expected == self.template_parsing(lines)
def test_process_template_can_parse_simple_line(self):
lines = u'Licensed by {{12 nexB}} to you '
expected = u'licensed by to you'
result = u' '.join(x.value for x in self.template_parsing(lines))
assert expected == result
def test_process_template_does_not_throw_exception_for_illegal_pystache_templates(self):
lines = u'''Permission to use, copy, modify, and {{ /or : the
lines exist without or }} distribute this software...'''
self.template_parsing(lines)
def test_process_template_handles_unicode_text_correctly(self):
expected = [
Token(start_line=0, end_line=0, start_char=1, end_char=4, value=u'ist', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=5, end_char=10, value=u'freie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=11, end_char=19, value=u'software', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=21, end_char=24, value=u'sie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=25, end_char=31, value=u'k\xf6nnen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=32, end_char=34, value=u'es', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=35, end_char=40, value=u'unter', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=41, end_char=44, value=u'den', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=45, end_char=56, value=u'bedingungen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=57, end_char=60, value=u'der', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=61, end_char=64, value=u'gnu', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=1, end_char=8, value=u'general', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=17, end_char=20, value=u'wie', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=21, end_char=24, value=u'von', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=25, end_char=28, value=u'der', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=29, end_char=33, value=u'free', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=34, end_char=42, value=u'software', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=43, end_char=53, value=u'foundation', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=54, end_char=68, value=u'ver\xf6ffentlicht', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=1, end_char=12, value=u'weitergeben', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=13, end_char=16, value=u'und', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=17, end_char=21, value=u'oder', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=23, end_char=24, value=u'n', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=1, end_char=13, value=u'modifizieren', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=15, end_char=23, value=u'entweder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=24, end_char=29, value=u'gem\xe4\xdf', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=30, end_char=37, value=u'version', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=38, end_char=39, value=u'3', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=40, end_char=43, value=u'der', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=44, end_char=50, value=u'lizenz', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=51, end_char=55, value=u'oder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=57, end_char=61, value=u'nach', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=62, end_char=67, value=u'ihrer', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=68, end_char=74, value=u'option', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=1, end_char=6, value=u'jeder', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=7, end_char=15, value=u'sp\xe4teren', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=1, end_char=8, value=u'version', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=7, end_line=7, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=1, end_char=4, value=u'die', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=5, end_char=21, value=u'ver\xf6ffentlichung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=22, end_char=25, value=u'von', gap=DEFAULT_GAP),
Token(start_line=8, end_line=8, start_char=38, end_char=45, value=u'erfolgt', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=46, end_char=48, value=u'in', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=49, end_char=52, value=u'der', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=53, end_char=61, value=u'hoffnung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=63, end_char=66, value=u'da\xdf', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=67, end_char=69, value=u'es', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=70, end_char=75, value=u'ihnen', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=76, end_char=79, value=u'von', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=1, end_char=7, value=u'nutzen', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=9, end_char=10, value=u'n', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=1, end_char=5, value=u'sein', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=6, end_char=10, value=u'wird', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=12, end_char=16, value=u'aber', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=17, end_char=21, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=22, end_char=32, value=u'irgendeine', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=33, end_char=41, value=u'garantie', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=43, end_char=48, value=u'sogar', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=49, end_char=53, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=54, end_char=57, value=u'die', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=58, end_char=67, value=u'implizite', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=68, end_char=76, value=u'garantie', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=5, end_char=15, value=u'marktreife', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=1, end_char=5, value=u'oder', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=6, end_char=9, value=u'der', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=10, end_char=24, value=u'verwendbarkeit', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=25, end_char=28, value=u'f\xfcr', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=29, end_char=34, value=u'einen', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=35, end_char=45, value=u'bestimmten', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=46, end_char=51, value=u'zweck', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=53, end_char=60, value=u'details', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=61, end_char=67, value=u'finden', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=68, end_char=71, value=u'sie', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=72, end_char=74, value=u'in', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=5, end_char=8, value=u'gnu', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=9, end_char=16, value=u'general', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=18, end_char=19, value=u'n', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=15, end_line=15, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=1, end_char=4, value=u'sie', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=5, end_char=12, value=u'sollten', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=13, end_char=16, value=u'ein', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=17, end_char=25, value=u'exemplar', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=26, end_char=29, value=u'der', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=30, end_char=33, value=u'gnu', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=34, end_char=41, value=u'general', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=42, end_char=48, value=u'public', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=49, end_char=56, value=u'license', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=57, end_char=65, value=u'zusammen', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=66, end_char=69, value=u'mit', gap=DEFAULT_GAP),
Token(start_line=17, end_line=17, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=1, end_char=9, value=u'erhalten', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=10, end_char=15, value=u'haben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=17, end_char=22, value=u'falls', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=23, end_char=28, value=u'nicht', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=30, end_char=39, value=u'schreiben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=40, end_char=43, value=u'sie', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=44, end_char=46, value=u'an', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=47, end_char=50, value=u'die', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=51, end_char=55, value=u'free', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=56, end_char=64, value=u'software', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=65, end_char=75, value=u'foundation', gap=NO_GAP),
Token(start_line=19, end_line=19, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=1, end_char=4, value=u'inc', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=7, end_char=9, value=u'51', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=10, end_char=18, value=u'franklin', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=19, end_char=21, value=u'st', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=23, end_char=28, value=u'fifth', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=29, end_char=34, value=u'floor', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=36, end_char=42, value=u'boston', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=44, end_char=46, value=u'ma', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=47, end_char=52, value=u'02110', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=54, end_char=57, value=u'usa', gap=NO_GAP),
]
test_file = self.get_test_loc('analysis/unicode/12180.atxt')
with codecs.open(test_file, encoding='utf-8') as test:
lines = test.read().splitlines()
result = list(self.template_parsing(lines))
assert expected == result
def test_process_template_can_handle_long_text(self):
expected = [
Token(start_line=0, end_line=0, start_char=14, end_char=17, value=u'ist', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=23, value=u'freie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=24, end_char=32, value=u'software', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=34, end_char=37, value=u'sie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=38, end_char=44, value=u'k\xf6nnen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=45, end_char=47, value=u'es', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=48, end_char=53, value=u'unter', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=54, end_char=57, value=u'den', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=58, end_char=69, value=u'bedingungen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=70, end_char=73, value=u'der', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=74, end_char=77, value=u'gnu', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=1, end_char=8, value=u'general', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=17, end_char=20, value=u'wie', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=21, end_char=24, value=u'von', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=25, end_char=28, value=u'der', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=29, end_char=33, value=u'free', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=34, end_char=42, value=u'software', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=43, end_char=53, value=u'foundation', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=54, end_char=68, value=u'ver\xf6ffentlicht', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=1, end_char=12, value=u'weitergeben', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=13, end_char=16, value=u'und', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=17, end_char=21, value=u'oder', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=23, end_char=24, value=u'n', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=1, end_char=13, value=u'modifizieren', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=15, end_char=23, value=u'entweder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=24, end_char=29, value=u'gem\xe4\xdf', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=30, end_char=37, value=u'version', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=38, end_char=39, value=u'3', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=40, end_char=43, value=u'der', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=44, end_char=50, value=u'lizenz', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=51, end_char=55, value=u'oder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=57, end_char=61, value=u'nach', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=62, end_char=67, value=u'ihrer', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=68, end_char=74, value=u'option', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=1, end_char=6, value=u'jeder', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=7, end_char=15, value=u'sp\xe4teren', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=1, end_char=8, value=u'version', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=7, end_line=7, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=1, end_char=4, value=u'die', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=5, end_char=21, value=u'ver\xf6ffentlichung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=22, end_char=25, value=u'von', gap=DEFAULT_GAP),
Token(start_line=8, end_line=8, start_char=38, end_char=45, value=u'erfolgt', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=46, end_char=48, value=u'in', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=49, end_char=52, value=u'der', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=53, end_char=61, value=u'hoffnung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=63, end_char=66, value=u'da\xdf', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=67, end_char=69, value=u'es', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=70, end_char=75, value=u'ihnen', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=76, end_char=79, value=u'von', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=1, end_char=7, value=u'nutzen', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=9, end_char=10, value=u'n', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=1, end_char=5, value=u'sein', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=6, end_char=10, value=u'wird', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=12, end_char=16, value=u'aber', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=17, end_char=21, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=22, end_char=32, value=u'irgendeine', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=33, end_char=41, value=u'garantie', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=43, end_char=48, value=u'sogar', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=49, end_char=53, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=54, end_char=57, value=u'die', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=58, end_char=67, value=u'implizite', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=68, end_char=76, value=u'garantie', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=5, end_char=15, value=u'marktreife', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=1, end_char=5, value=u'oder', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=6, end_char=9, value=u'der', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=10, end_char=24, value=u'verwendbarkeit', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=25, end_char=28, value=u'f\xfcr', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=29, end_char=34, value=u'einen', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=35, end_char=45, value=u'bestimmten', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=46, end_char=51, value=u'zweck', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=53, end_char=60, value=u'details', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=61, end_char=67, value=u'finden', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=68, end_char=71, value=u'sie', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=72, end_char=74, value=u'in', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=5, end_char=8, value=u'gnu', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=9, end_char=16, value=u'general', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=18, end_char=19, value=u'n', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=15, end_line=15, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=1, end_char=4, value=u'sie', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=5, end_char=12, value=u'sollten', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=13, end_char=16, value=u'ein', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=17, end_char=25, value=u'exemplar', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=26, end_char=29, value=u'der', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=30, end_char=33, value=u'gnu', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=34, end_char=41, value=u'general', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=42, end_char=48, value=u'public', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=49, end_char=56, value=u'license', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=57, end_char=65, value=u'zusammen', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=66, end_char=69, value=u'mit', gap=DEFAULT_GAP),
Token(start_line=17, end_line=17, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=1, end_char=9, value=u'erhalten', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=10, end_char=15, value=u'haben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=17, end_char=22, value=u'falls', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=23, end_char=28, value=u'nicht', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=30, end_char=39, value=u'schreiben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=40, end_char=43, value=u'sie', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=44, end_char=46, value=u'an', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=47, end_char=50, value=u'die', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=51, end_char=55, value=u'free', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=56, end_char=64, value=u'software', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=65, end_char=75, value=u'foundation', gap=NO_GAP),
Token(start_line=19, end_line=19, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=1, end_char=4, value=u'inc', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=7, end_char=9, value=u'51', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=10, end_char=18, value=u'franklin', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=19, end_char=21, value=u'st', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=23, end_char=28, value=u'fifth', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=29, end_char=34, value=u'floor', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=36, end_char=42, value=u'boston', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=44, end_char=46, value=u'ma', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=47, end_char=52, value=u'02110', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=54, end_char=57, value=u'usa', gap=NO_GAP),
]
test_file = self.get_test_loc('analysis/unicode/12180.txt')
with codecs.open(test_file, encoding='utf-8') as test:
result = list(self.template_parsing(test))
assert expected == result
def test_process_template_does_not_crash_on_unicode_rules_text_1(self):
test_file = self.get_test_loc('analysis/unicode/12290.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_2(self):
test_file = self.get_test_loc('analysis/unicode/12319.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_3(self):
test_file = self.get_test_loc('analysis/unicode/12405.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_4(self):
test_file = self.get_test_loc('analysis/unicode/12407.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_5(self):
test_file = self.get_test_loc('analysis/unicode/12420.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_detects_non_well_formed_templatized_regions(self):
lines = u'abcd{{ef'
self.assertRaises(UnbalancedTemplateError, self.template_parsing, lines)
def test_process_template_handles_combination_of_well_formed_and_ill_formed_templates_2(self):
lines = u'}}{{{{abc}}ddd}}{{'
self.assertRaises(UnbalancedTemplateError, self.template_parsing, lines)
def test_process_template_can_parse_ill_formed_template(self):
tf = self.get_test_loc('analysis/ill_formed_template/text.txt')
lines = unicode_text_lines(tf)
result = list(self.template_parsing(lines))
expected_gaps = [30, 10, 60, 70, 20]
result_gaps = [x.gap for x in result if x.gap]
assert expected_gaps == result_gaps
et = self.get_test_loc('analysis/ill_formed_template/expected_grams.json')
result_dicts = [t._asdict() for t in result]
regen = False
if regen:
with codecs.open(et, 'w', encoding='utf-8') as out:
json.dump(result_dicts, out, indent=2)
with codecs.open(et, encoding='utf-8') as inp:
expected = json.load(inp)
assert expected == result_dicts
def test_token_positions_are_kept_same_for_unigrams_and_ngrams_with_template(self):
lines = u'some text is some text {{ }} in all cases\n \n'
unigrams = unigram_tokenizer(iter([lines]), template=False)
tunigrams = unigram_tokenizer(iter([lines]), template=True)
ngrams = ngram_tokenizer(iter([lines]), ngram_len=3, template=False)
tngrams = ngram_tokenizer(iter([lines]), ngram_len=3, template=True)
expected_start_end = (0, 7,)
def check_start_end(l):
l = list(l)
result = (l[0].start, l[-1].end,)
assert expected_start_end == result
check_start_end(unigrams)
check_start_end(tunigrams)
check_start_end(ngrams)
check_start_end(tngrams)
def test_plain_unigrams_from_templated_unigrams(self):
lines = [u'My old tailor {{3 John Doe}} is quite very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
result = list(template_processor(unigrams))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=0, value=u'my'),
Token(start=0, start_line=0, start_char=3, end_line=0, end_char=6, end=0, gap=0, value=u'old'),
Token(start=0, start_line=0, start_char=7, end_line=0, end_char=13, end=0, gap=3, value=u'tailor'),
Token(start=0, start_line=0, start_char=29, end_line=0, end_char=31, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=32, end_line=0, end_char=37, end=0, gap=0, value=u'quite'),
Token(start=0, start_line=0, start_char=38, end_line=0, end_char=42, end=0, gap=0, value=u'very'),
Token(start=0, start_line=0, start_char=43, end_line=0, end_char=47, end=0, gap=0, value=u'rich'),
]
assert expected == result
class TestLegacyNgrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_plain_ngrams_processor(self):
from collections import deque
# MASKED: ngram_processor function (lines 692-712)
text = (
u'''/*COMMENT
COMMENT COMMENT
- COMMENT
*/
public static boolean activateSearchResultView() {
String defaultPerspectiveId= SearchUI.getDefaultPerspectiveId();
if (defaultPerspectiveId != null) {
IWorkbenchWindow window= SearchPlugin.getActiveWorkbenchWindow();
if (window != null && window.getShell() != null && !window.getShell().isDisposed()) {
try {
PlatformUI.getWorkbench().showPerspective(defaultPerspectiveId, window);
} catch (WorkbenchException ex) {
// show view in current perspective
}
}
}''')
expected = [
(u'comment', u'comment', u'comment', u'comment', u'public', u'static'),
(u'comment', u'comment', u'comment', u'public', u'static', u'boolean'),
(u'comment', u'comment', u'public', u'static', u'boolean',
u'activatesearchresultview'),
(u'comment', u'public', u'static', u'boolean',
u'activatesearchresultview', u'string'),
(u'public', u'static', u'boolean', u'activatesearchresultview',
u'string', u'defaultperspectiveid'),
(u'static', u'boolean', u'activatesearchresultview', u'string',
u'defaultperspectiveid', u'searchui'),
(u'boolean', u'activatesearchresultview', u'string',
u'defaultperspectiveid', u'searchui', u'getdefaultperspectiveid'),
(u'activatesearchresultview', u'string', u'defaultperspectiveid',
u'searchui', u'getdefaultperspectiveid', u'if'),
(u'string', u'defaultperspectiveid', u'searchui',
u'getdefaultperspectiveid', u'if', u'defaultperspectiveid'),
(u'defaultperspectiveid', u'searchui', u'getdefaultperspectiveid',
u'if', u'defaultperspectiveid', u'null'),
(u'searchui', u'getdefaultperspectiveid', u'if',
u'defaultperspectiveid', u'null', u'iworkbenchwindow'),
(u'getdefaultperspectiveid', u'if', u'defaultperspectiveid', u'null',
u'iworkbenchwindow', u'window'),
(u'if', u'defaultperspectiveid', u'null', u'iworkbenchwindow',
u'window', u'searchplugin'),
(u'defaultperspectiveid', u'null', u'iworkbenchwindow', u'window',
u'searchplugin', u'getactiveworkbenchwindow'),
(u'null', u'iworkbenchwindow', u'window', u'searchplugin',
u'getactiveworkbenchwindow', u'if'),
(u'iworkbenchwindow', u'window', u'searchplugin',
u'getactiveworkbenchwindow', u'if', u'window'),
(u'window', u'searchplugin', u'getactiveworkbenchwindow', u'if',
u'window', u'null'),
(u'searchplugin', u'getactiveworkbenchwindow', u'if', u'window',
u'null', u'window'),
(u'getactiveworkbenchwindow', u'if', u'window', u'null', u'window',
u'getshell'),
(u'if', u'window', u'null', u'window', u'getshell', u'null'),
(u'window', u'null', u'window', u'getshell', u'null', u'window'),
(u'null', u'window', u'getshell', u'null', u'window', u'getshell'),
(u'window', u'getshell', u'null', u'window', u'getshell', u'isdisposed'),
(u'getshell', u'null', u'window', u'getshell', u'isdisposed', u'try'),
(u'null', u'window', u'getshell', u'isdisposed', u'try', u'platformui'),
(u'window', u'getshell', u'isdisposed', u'try', u'platformui',
u'getworkbench'),
(u'getshell', u'isdisposed', u'try', u'platformui', u'getworkbench',
u'showperspective'),
(u'isdisposed', u'try', u'platformui', u'getworkbench',
u'showperspective', u'defaultperspectiveid'),
(u'try', u'platformui', u'getworkbench', u'showperspective',
u'defaultperspectiveid', u'window'),
(u'platformui', u'getworkbench', u'showperspective',
u'defaultperspectiveid', u'window', u'catch'),
(u'getworkbench', u'showperspective', u'defaultperspectiveid',
u'window', u'catch', u'workbenchexception'),
(u'showperspective', u'defaultperspectiveid', u'window', u'catch',
u'workbenchexception', u'ex'),
(u'defaultperspectiveid', u'window', u'catch', u'workbenchexception',
u'ex', u'show'),
(u'window', u'catch', u'workbenchexception', u'ex', u'show', u'view'),
(u'catch', u'workbenchexception', u'ex', u'show', u'view', u'in'),
(u'workbenchexception', u'ex', u'show', u'view', u'in', u'current'),
(u'ex', u'show', u'view', u'in', u'current', u'perspective'),
]
unigrams = (x.value for x
in unigram_splitter(text.splitlines()))
result = list(ngram_processor(unigrams, ngram_len=6))
assert expected == result
class TestNgrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_tokens_ngram_processor_bigrams_from_unigrams(self):
text = u'this is some text \n on multiple lines'
unigrams = unigram_splitter(text.splitlines())
result = list(tokens_ngram_processor(unigrams, ngram_len=2))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some')),
(Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text')),
(Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on')),
(Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple')),
(Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple'),
Token(start_line=1, start_char=13, end_line=1, end_char=18, value=u'lines'))
]
assert expected == result
def test_tokens_ngram_processor_n2_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=2))
assert expected == result
def test_tokens_ngram_processor_n3_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=3))
assert expected == result
def test_tokens_ngram_processor_n4_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=4))
assert expected == result
def test_tokens_ngram_processor_n10_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=10))
assert expected == result
def test_tokens_ngram_processor_n1_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=1))
assert expected == result
def test_tokens_ngram_processor_3grams_from_unigrams_on_multilines(self):
text = u'this is some text \n on multiple lines'
unigrams = unigram_splitter(text.splitlines())
result = list(tokens_ngram_processor(unigrams, ngram_len=3))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some')),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text')),
(Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on')),
(Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple')),
(Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple'),
Token(start_line=1, start_char=13, end_line=1, end_char=18, value=u'lines'))
]
assert expected == result
def test_tokens_ngram_processor_with_template_gaps_basic(self):
lines = [u'My old {{3 John Doe}} is rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=3))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=0, value=u'my'),
Token(start=0, start_line=0, start_char=3, end_line=0, end_char=6, end=0, gap=3, value=u'old'),
),
(Token(start=0, start_line=0, start_char=22, end_line=0, end_char=24, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=25, end_line=0, end_char=29, end=0, gap=0, value=u'rich'),
)
]
assert expected == result
def test_tokens_ngram_processor_with_template_gaps_merged(self):
lines = [u'My old tailor {{3 John Doe}} is quite very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start_line=0, start_char=0, end_line=0, end_char=13, gap=ngram_len, value=(u'my', u'old', u'tailor')),
Token(start_line=0, start_char=29, end_line=0, end_char=42, gap=0, value=(u'is', u'quite', u'very')),
Token(start_line=0, start_char=32, end_line=0, end_char=47, gap=0, value=(u'quite', u'very', u'rich')),
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_short_grams(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }} very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite')),
Token(start=0, start_line=0, start_char=48, end_line=0, end_char=57, end=0, gap=0, value=(u'very', u'rich'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_short_and_long_grams(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }} very rich really rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite')),
Token(start=0, start_line=0, start_char=48, end_line=0, end_char=64, end=0, gap=0, value=(u'very', u'rich', u'really')),
Token(start=0, start_line=0, start_char=53, end_line=0, end_char=69, end=0, gap=0, value=(u'rich', u'really', u'rich'))
]
assert expected == result
def test_ngram_to_token_processor_with_gaps_at_the_end(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }}']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_at_the_end_does_yield_empty_tuples(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }}']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
result = list(tokens_ngram_processor(templated, ngram_len=ngram_len))
assert (None, None, None,) != result[-1]
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=u'my'),),
(Token(start=0, start_line=0, start_char=20, end_line=0, end_char=22, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=23, end_line=0, end_char=28, end=0, gap=5, value=u'quite'),
)
]
assert expected == result
def test_ngrams_tokenizer_does_not_yield_4grams_for_3grams(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the names {{}}of its contributors may
materials provided with the distribution.'''.splitlines()
result = list(ngram_tokenizer(iter(lines), ngram_len=3, template=True))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=16, end=2, gap=0, value=(u'neither', u'the', u'name')),
Token(start=1, start_line=0, start_char=8, end_line=0, end_char=19, end=3, gap=10, value=(u'the', u'name', u'of')),
Token(start=4, start_line=0, start_char=44, end_line=0, end_char=47, end=4, gap=5, value=(u'nor',)),
Token(start=5, start_line=0, start_char=52, end_line=0, end_char=61, end=6, gap=5, value=(u'the', u'names')),
Token(start=7, start_line=0, start_char=66, end_line=0, end_char=85, end=9, gap=0, value=(u'of', u'its', u'contributors')),
Token(start=8, start_line=0, start_char=69, end_line=0, end_char=89, end=10, gap=0, value=(u'its', u'contributors', u'may')),
Token(start=9, start_line=0, start_char=73, end_line=1, end_char=25, end=11, gap=0, value=(u'contributors', u'may', u'materials')),
Token(start=10, start_line=0, start_char=86, end_line=1, end_char=34, end=12, gap=0, value=(u'may', u'materials', u'provided')),
Token(start=11, start_line=1, start_char=16, end_line=1, end_char=39, end=13, gap=0, value=(u'materials', u'provided', u'with')),
Token(start=12, start_line=1, start_char=26, end_line=1, end_char=43, end=14, gap=0, value=(u'provided', u'with', u'the')),
Token(start=13, start_line=1, start_char=35, end_line=1, end_char=56, end=15, gap=0, value=(u'with', u'the', u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_always_returns_3grams_when_requested(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the
names {{}}of its contributors may materials provided with
the distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=3))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name')),
(Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=0, start_char=52, end_line=0, end_char=55, end=0, gap=0, value=u'the'),
Token(start=0, start_line=1, start_char=19, end_line=1, end_char=24, end=0, gap=5, value=u'names')),
(Token(start=0, start_line=1, start_char=29, end_line=1, end_char=31, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=23, end_line=2, end_char=35, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_always_returns_4grams_when_requested(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the
names {{}}of its contributors may materials provided with
the distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=4))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=0, start_char=52, end_line=0, end_char=55, end=0, gap=0, value=u'the'),
Token(start=0, start_line=1, start_char=19, end_line=1, end_char=24, end=0, gap=5, value=u'names')),
(Token(start=0, start_line=1, start_char=29, end_line=1, end_char=31, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=23, end_line=2, end_char=35, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_can_handle_contiguous_template_regions(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}
{{6 }}of its contributors may materials provided with the
distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=4))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=1, start_char=25, end_line=1, end_char=27, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=28, end_line=1, end_char=31, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=28, end_line=1, end_char=31, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with'),
Token(start=0, start_line=1, start_char=73, end_line=1, end_char=76, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with'),
Token(start=0, start_line=1, start_char=73, end_line=1, end_char=76, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=31, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_ngram_tokenizer_can_handle_gaps_at_end_of_text(self):
lines = [u'Neither the name of {{10 the ORGANIZATION}} ']
ngram_len = 2
result = list(ngram_tokenizer(lines, ngram_len, template=True))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'neither', u'the')),
Token(start=1, start_line=0, start_char=8, end_line=0, end_char=16, end=2, gap=0, value=(u'the', u'name')),
Token(start=2, start_line=0, start_char=12, end_line=0, end_char=19, end=3, gap=10, value=(u'name', u'of'))
]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_n3(self):
lines = [u'X11 License']
ngram_len = 3
result = list(ngram_tokenizer(lines, ngram_len))
assert lines == list(doc_subset(lines, result[0]))
expected = [Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'x11', u'license'))]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_n1(self):
lines = [u'X11 License']
ngram_len = 1
result = list(ngram_tokenizer(lines, ngram_len))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=3, end=0, gap=0, value=(u'x11',)),
Token(start=1, start_line=0, start_char=4, end_line=0, end_char=11, end=1, gap=0, value=(u'license',)),
]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_template(self):
lines = [u'X11 License']
ngram_len = 3
result = list(ngram_tokenizer(lines, ngram_len, template=True))
assert lines == list(doc_subset(lines, result[0]))
expected = [Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'x11', u'license'))]
assert expected == result
def test_unicode_text_lines_handles_weird_xml_encodings(self):
test_file = self.get_test_loc('analysis/weird_encoding/easyconf-0.9.0.pom')
result = list(unicode_text_lines(test_file))
expected_file = self.get_test_loc('analysis/weird_encoding/easyconf-0.9.0.pom.expected')
with open(expected_file, 'rb') as tf:
expected = cPickle.load(tf)
assert expected == result
class TestMultigrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
# TODO: add more tests beyond the simple doctests that exist in the code
@skipIf(True, 'Performance tests only')
class TestAnalysisPerformance(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_splitter_perf(self):
test_file = self.get_test_loc('perf/test.txt')
text = open(test_file).read() * 100
utext = unicode(text)
setup1 = '''
import re
from textcode import analysis
unicode_ws = analysis.word_splitter
plain_ws =re.compile(r'[^\W_]+').finditer
unicode_ts = analysis.template_splitter
plain_ts= re.compile(r'(?:[^\W_])+|(?:\{\{)|(?:\}\})').finditer
text = %r
utext = %r''' % (text, utext)
def check_perf(setup):
from timeit import timeit
stmt = 'list(w for w in %s(%s))'
print()
print('Unicode template')
print(timeit(stmt % ('unicode_ts', 'utext'), setup=setup, number=1000))
print('Plain template')
print(timeit(stmt % ('plain_ts', 'text'), setup=setup, number=1000))
print('Unicode words')
print(timeit(stmt % ('unicode_ws', 'utext'), setup=setup, number=1000))
print('Plain words')
print(timeit(stmt % ('plain_ws', 'text'), setup=setup, number=1000))
print('Plain split')
print(timeit('text.split()', setup=setup, number=1000))
print('Unicode split')
print(timeit('utext.split()', setup=setup, number=1000))
print('Line split')
print(timeit('text.splitlines(False)', setup=setup, number=1000))
print('Line split with ends')
print(timeit('text.splitlines(True)', setup=setup, number=1000))
check_perf(setup=setup1)
setup2 = '''
import re
from textcode import analysis
unicode_ws = analysis.word_splitter
plain_ws =re.compile(r'[^\W_]+').finditer
unicode_ts = analysis.template_splitter
plain_ts= re.compile(r'(?:[^\W_])+|(?:\{\{)|(?:\}\})').finditer
text = %r
utext = %r''' % (text, utext)
check_perf(setup=setup2)
|
def ngram_processor(items, ngram_len):
"""
Given a sequence or iterable of arbitrary items, return an iterator of
item ngrams tuples of length ngram_len. Buffers at most ngram_len iterable
items.
For example::
>>> list(ngram_processor([1, 2, 3, 4, 5], ngram_len=3))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
"""
ngram = deque()
current_len = 0
for item in items:
if current_len == ngram_len:
yield tuple(ngram)
ngram.popleft()
current_len -= 1
ngram.append(item)
current_len += 1
yield tuple(ngram)
| 692
| 712
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import json
import os
import codecs
import cPickle
from unittest.case import skipIf
from commoncode.testcase import FileBasedTesting
from textcode.analysis import DEFAULT_GAP
from textcode.analysis import NO_GAP
from textcode.analysis import InvalidGapError
from textcode.analysis import UnbalancedTemplateError
from textcode.analysis import Token
from textcode.analysis import word_splitter
from textcode.analysis import unigram_splitter
from textcode.analysis import unigram_tokenizer
from textcode.analysis import position_processor
from textcode.analysis import template_splitter
from textcode.analysis import template_processor
from textcode.analysis import ngram_to_token
from textcode.analysis import ngram_tokenizer
from textcode.analysis import tokens_ngram_processor
from textcode.analysis import doc_subset
from textcode.analysis import unicode_text_lines
from textcode.analysis import text_lines
#############################################################################
#
# Code style note: lines are not wrapped to PEP8 line length on purpose
# to keep the tests more readable
#
#############################################################################
class TestDocsubset(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_doc_subset_single_line(self):
doc = '''A simple test
with multiple
lines
of text
'''.splitlines()
pos = Token(start=0, end=0, start_line=1, start_char=8, end_line=1, end_char=21)
expected = '''with multiple'''
tst = doc_subset(iter(doc), pos)
result = '\n'.join(tst)
assert expected == result
def test_doc_subset_multilines(self):
doc = '''0123456789\n0123456789\n'''.splitlines()
pos = Token(start=0, end=0, start_line=0, start_char=0, end_line=0, end_char=10)
expected = '0123456789'
tst = doc_subset(iter(doc), pos)
result = ''.join(tst)
assert expected == result
def test_doc_subset(self):
doc = iter('''A simple test
with multiple
lines
of text
'''.splitlines())
pos = Token(start=3, end=54, start_line=1, start_char=8, end_line=2, end_char=11)
expected = u'''with multiple
lin'''
tst = doc_subset(iter(doc), pos)
result = u'\n'.join(tst)
assert expected == result
class TestAnalysis(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_text_lines_from_list_or_location_yield_same_results(self):
test_file = self.get_test_loc('analysis/bsd-new')
with open(test_file, 'rb') as inf:
test_strings_list = inf.read().splitlines(True)
# test when we are passing a location or a list
from_loc = list(text_lines(location=test_file))
from_list = list(text_lines(location=test_strings_list))
assert from_loc == from_list
class TestUnigrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_unigrams_word_splitter_handles_empty_string(self):
text = iter([''])
result = list(unigram_splitter(text, splitter=word_splitter))
assert [] == result
def test_unigrams_word_splitter_handles_blank_lines(self):
text = iter([u' ', u'', u'\t '])
result = list(unigram_splitter(text, splitter=word_splitter))
assert [] == result
def test_unigrams_word_splitter_can_split(self):
text = iter(u'abc def \n GHI'.splitlines())
result = list(unigram_splitter(text, splitter=word_splitter))
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=3, value=u'abc'),
Token(start_line=0, end_line=0, start_char=4, end_char=7, value=u'def'),
Token(start_line=1, end_line=1, start_char=1, end_char=4, value=u'ghi'),
]
assert expected == result
def test_unigrams_word_splitter_handles_empty_iterable(self):
text = iter([])
result = list(unigram_splitter(text, splitter=word_splitter))
assert [] == result
def test_unigrams_template_splitter_handles_empty_string(self):
text = iter([''])
result = list(unigram_splitter(text, splitter=template_splitter))
assert [] == result
def test_unigrams_template_splitter_handles_blank_lines(self):
text = iter([' ', '', '\t '])
result = list(unigram_splitter(text, splitter=template_splitter))
assert [] == result
def test_unigrams_template_splitter_handles_empty_iterable(self):
text = iter([])
result = list(unigram_splitter(text, splitter=template_splitter))
assert [] == result
def test_unigrams_template_splitter_can_split(self):
text = iter(u'abc def \n GHI'.splitlines())
result = list(unigram_splitter(text, splitter=template_splitter))
assert [u'abc', u'def', u'ghi'] == [x.value for x in result]
def test_unigrams_template_splitter_can_split_templates(self):
text = u'abc def \n {{temp}} GHI'.splitlines()
result = list(unigram_splitter(text, splitter=template_splitter))
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=3, value=u'abc'),
Token(start_line=0, end_line=0, start_char=4, end_char=7, value=u'def'),
Token(start_line=1, end_line=1, start_char=1, end_char=3, value=u'{{'),
Token(start_line=1, end_line=1, start_char=3, end_char=7, value=u'temp'),
Token(start_line=1, end_line=1, start_char=7, end_char=9, value=u'}}'),
Token(start_line=1, end_line=1, start_char=10, end_char=13, value=u'ghi'),
]
assert expected == result
def test_position_processor(self):
tokens = [
Token(value=u'abc'),
Token(value=u'def'),
Token(value=u'temp'),
Token(value=u'ghi'),
]
expected = [
Token(value=u'abc', start=0, end=0),
Token(value=u'def', start=1, end=1),
Token(value=u'temp', start=2, end=2),
Token(value=u'ghi', start=3, end=3),
]
result = list(position_processor(tokens))
assert expected == result
def test_unigram_tokenizer(self):
inp = u'''Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following
conditions are met:
Redistributions of source code must retain the above
copyright notice, this list of conditions and the following
disclaimer.'''
tst = list(unigram_tokenizer(inp.splitlines(True)))
assert 39 == len(tst)
expected = u'''redistribution and use in source and binary forms with or
without modification are permitted provided that the following
conditions are met redistributions of source code must retain the above
copyright notice this list of conditions and the following
disclaimer'''.split()
result = [t.value for t in tst]
assert expected == result
class TestTemplates(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def template_parsing(self, lines):
if isinstance(lines, basestring):
lines = lines.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
return list(template_processor(unigrams))
def test_process_template_handles_empty_templates_using_default_gap(self):
lines = [u'ab{{}}cd']
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=6, end_char=8, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_gap(self):
lines = u'ab{{10 nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=21, end_char=23, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_raise_invalid_gap_exception(self):
lines = u'ab{{151 nexb Company}}cd'
self.assertRaises(InvalidGapError, self.template_parsing, lines)
def test_process_template_recognizes_template_with_maxgap(self):
lines = u'ab{{150 nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=150),
Token(start_line=0, end_line=0, start_char=22, end_char=24, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_only_gap(self):
lines = u'ab{{10}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=8, end_char=10, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_only_gap_and_spaces(self):
lines = u'ab{{ 10 }}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=16, end_char=18, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_set_default_gap_if_none_is_specified(self):
lines = u'ab{{nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_set_default_gap_if_none_is_specified_ignoring_spaces(self):
lines = u'ab{{ \sdsdnexb Companysd }}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=28, end_char=30, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_can_process_multiple_templatized_regions_with_default_gap(self):
lines = u'ab{{nexb Company}}cd {{second}}ef'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=31, end_char=33, value=u'ef', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_can_process_multiple_templatized_regions_with_default_gap_and_custom_gaps(self):
lines = u'ab{{nexb Company}}cd{{12 second}}ef{{12 second}}gh'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=12),
Token(start_line=0, end_line=0, start_char=33, end_char=35, value=u'ef', gap=12),
Token(start_line=0, end_line=0, start_char=48, end_char=50, value=u'gh', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_handles_combination_of_well_formed_and_ill_formed_templates(self):
lines = u'ab{{c}}d}}ef'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=7, end_char=8, value=u'd', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=10, end_char=12, value=u'ef', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_handles_empty_lines(self):
lines = u'\n\n'
expected = []
assert expected == self.template_parsing(lines)
def test_process_template_handles_None(self):
lines = None
expected = []
assert expected == self.template_parsing(lines)
def test_process_template_can_parse_simple_line(self):
lines = u'Licensed by {{12 nexB}} to you '
expected = u'licensed by to you'
result = u' '.join(x.value for x in self.template_parsing(lines))
assert expected == result
def test_process_template_does_not_throw_exception_for_illegal_pystache_templates(self):
lines = u'''Permission to use, copy, modify, and {{ /or : the
lines exist without or }} distribute this software...'''
self.template_parsing(lines)
def test_process_template_handles_unicode_text_correctly(self):
expected = [
Token(start_line=0, end_line=0, start_char=1, end_char=4, value=u'ist', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=5, end_char=10, value=u'freie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=11, end_char=19, value=u'software', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=21, end_char=24, value=u'sie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=25, end_char=31, value=u'k\xf6nnen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=32, end_char=34, value=u'es', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=35, end_char=40, value=u'unter', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=41, end_char=44, value=u'den', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=45, end_char=56, value=u'bedingungen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=57, end_char=60, value=u'der', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=61, end_char=64, value=u'gnu', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=1, end_char=8, value=u'general', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=17, end_char=20, value=u'wie', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=21, end_char=24, value=u'von', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=25, end_char=28, value=u'der', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=29, end_char=33, value=u'free', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=34, end_char=42, value=u'software', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=43, end_char=53, value=u'foundation', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=54, end_char=68, value=u'ver\xf6ffentlicht', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=1, end_char=12, value=u'weitergeben', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=13, end_char=16, value=u'und', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=17, end_char=21, value=u'oder', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=23, end_char=24, value=u'n', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=1, end_char=13, value=u'modifizieren', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=15, end_char=23, value=u'entweder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=24, end_char=29, value=u'gem\xe4\xdf', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=30, end_char=37, value=u'version', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=38, end_char=39, value=u'3', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=40, end_char=43, value=u'der', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=44, end_char=50, value=u'lizenz', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=51, end_char=55, value=u'oder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=57, end_char=61, value=u'nach', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=62, end_char=67, value=u'ihrer', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=68, end_char=74, value=u'option', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=1, end_char=6, value=u'jeder', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=7, end_char=15, value=u'sp\xe4teren', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=1, end_char=8, value=u'version', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=7, end_line=7, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=1, end_char=4, value=u'die', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=5, end_char=21, value=u'ver\xf6ffentlichung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=22, end_char=25, value=u'von', gap=DEFAULT_GAP),
Token(start_line=8, end_line=8, start_char=38, end_char=45, value=u'erfolgt', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=46, end_char=48, value=u'in', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=49, end_char=52, value=u'der', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=53, end_char=61, value=u'hoffnung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=63, end_char=66, value=u'da\xdf', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=67, end_char=69, value=u'es', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=70, end_char=75, value=u'ihnen', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=76, end_char=79, value=u'von', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=1, end_char=7, value=u'nutzen', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=9, end_char=10, value=u'n', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=1, end_char=5, value=u'sein', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=6, end_char=10, value=u'wird', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=12, end_char=16, value=u'aber', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=17, end_char=21, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=22, end_char=32, value=u'irgendeine', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=33, end_char=41, value=u'garantie', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=43, end_char=48, value=u'sogar', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=49, end_char=53, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=54, end_char=57, value=u'die', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=58, end_char=67, value=u'implizite', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=68, end_char=76, value=u'garantie', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=5, end_char=15, value=u'marktreife', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=1, end_char=5, value=u'oder', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=6, end_char=9, value=u'der', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=10, end_char=24, value=u'verwendbarkeit', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=25, end_char=28, value=u'f\xfcr', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=29, end_char=34, value=u'einen', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=35, end_char=45, value=u'bestimmten', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=46, end_char=51, value=u'zweck', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=53, end_char=60, value=u'details', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=61, end_char=67, value=u'finden', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=68, end_char=71, value=u'sie', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=72, end_char=74, value=u'in', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=5, end_char=8, value=u'gnu', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=9, end_char=16, value=u'general', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=18, end_char=19, value=u'n', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=15, end_line=15, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=1, end_char=4, value=u'sie', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=5, end_char=12, value=u'sollten', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=13, end_char=16, value=u'ein', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=17, end_char=25, value=u'exemplar', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=26, end_char=29, value=u'der', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=30, end_char=33, value=u'gnu', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=34, end_char=41, value=u'general', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=42, end_char=48, value=u'public', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=49, end_char=56, value=u'license', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=57, end_char=65, value=u'zusammen', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=66, end_char=69, value=u'mit', gap=DEFAULT_GAP),
Token(start_line=17, end_line=17, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=1, end_char=9, value=u'erhalten', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=10, end_char=15, value=u'haben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=17, end_char=22, value=u'falls', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=23, end_char=28, value=u'nicht', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=30, end_char=39, value=u'schreiben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=40, end_char=43, value=u'sie', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=44, end_char=46, value=u'an', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=47, end_char=50, value=u'die', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=51, end_char=55, value=u'free', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=56, end_char=64, value=u'software', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=65, end_char=75, value=u'foundation', gap=NO_GAP),
Token(start_line=19, end_line=19, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=1, end_char=4, value=u'inc', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=7, end_char=9, value=u'51', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=10, end_char=18, value=u'franklin', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=19, end_char=21, value=u'st', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=23, end_char=28, value=u'fifth', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=29, end_char=34, value=u'floor', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=36, end_char=42, value=u'boston', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=44, end_char=46, value=u'ma', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=47, end_char=52, value=u'02110', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=54, end_char=57, value=u'usa', gap=NO_GAP),
]
test_file = self.get_test_loc('analysis/unicode/12180.atxt')
with codecs.open(test_file, encoding='utf-8') as test:
lines = test.read().splitlines()
result = list(self.template_parsing(lines))
assert expected == result
def test_process_template_can_handle_long_text(self):
expected = [
Token(start_line=0, end_line=0, start_char=14, end_char=17, value=u'ist', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=23, value=u'freie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=24, end_char=32, value=u'software', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=34, end_char=37, value=u'sie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=38, end_char=44, value=u'k\xf6nnen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=45, end_char=47, value=u'es', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=48, end_char=53, value=u'unter', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=54, end_char=57, value=u'den', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=58, end_char=69, value=u'bedingungen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=70, end_char=73, value=u'der', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=74, end_char=77, value=u'gnu', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=1, end_char=8, value=u'general', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=17, end_char=20, value=u'wie', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=21, end_char=24, value=u'von', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=25, end_char=28, value=u'der', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=29, end_char=33, value=u'free', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=34, end_char=42, value=u'software', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=43, end_char=53, value=u'foundation', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=54, end_char=68, value=u'ver\xf6ffentlicht', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=1, end_char=12, value=u'weitergeben', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=13, end_char=16, value=u'und', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=17, end_char=21, value=u'oder', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=23, end_char=24, value=u'n', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=1, end_char=13, value=u'modifizieren', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=15, end_char=23, value=u'entweder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=24, end_char=29, value=u'gem\xe4\xdf', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=30, end_char=37, value=u'version', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=38, end_char=39, value=u'3', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=40, end_char=43, value=u'der', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=44, end_char=50, value=u'lizenz', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=51, end_char=55, value=u'oder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=57, end_char=61, value=u'nach', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=62, end_char=67, value=u'ihrer', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=68, end_char=74, value=u'option', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=1, end_char=6, value=u'jeder', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=7, end_char=15, value=u'sp\xe4teren', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=1, end_char=8, value=u'version', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=7, end_line=7, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=1, end_char=4, value=u'die', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=5, end_char=21, value=u'ver\xf6ffentlichung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=22, end_char=25, value=u'von', gap=DEFAULT_GAP),
Token(start_line=8, end_line=8, start_char=38, end_char=45, value=u'erfolgt', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=46, end_char=48, value=u'in', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=49, end_char=52, value=u'der', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=53, end_char=61, value=u'hoffnung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=63, end_char=66, value=u'da\xdf', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=67, end_char=69, value=u'es', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=70, end_char=75, value=u'ihnen', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=76, end_char=79, value=u'von', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=1, end_char=7, value=u'nutzen', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=9, end_char=10, value=u'n', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=1, end_char=5, value=u'sein', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=6, end_char=10, value=u'wird', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=12, end_char=16, value=u'aber', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=17, end_char=21, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=22, end_char=32, value=u'irgendeine', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=33, end_char=41, value=u'garantie', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=43, end_char=48, value=u'sogar', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=49, end_char=53, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=54, end_char=57, value=u'die', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=58, end_char=67, value=u'implizite', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=68, end_char=76, value=u'garantie', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=5, end_char=15, value=u'marktreife', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=1, end_char=5, value=u'oder', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=6, end_char=9, value=u'der', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=10, end_char=24, value=u'verwendbarkeit', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=25, end_char=28, value=u'f\xfcr', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=29, end_char=34, value=u'einen', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=35, end_char=45, value=u'bestimmten', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=46, end_char=51, value=u'zweck', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=53, end_char=60, value=u'details', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=61, end_char=67, value=u'finden', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=68, end_char=71, value=u'sie', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=72, end_char=74, value=u'in', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=5, end_char=8, value=u'gnu', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=9, end_char=16, value=u'general', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=18, end_char=19, value=u'n', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=15, end_line=15, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=1, end_char=4, value=u'sie', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=5, end_char=12, value=u'sollten', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=13, end_char=16, value=u'ein', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=17, end_char=25, value=u'exemplar', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=26, end_char=29, value=u'der', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=30, end_char=33, value=u'gnu', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=34, end_char=41, value=u'general', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=42, end_char=48, value=u'public', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=49, end_char=56, value=u'license', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=57, end_char=65, value=u'zusammen', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=66, end_char=69, value=u'mit', gap=DEFAULT_GAP),
Token(start_line=17, end_line=17, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=1, end_char=9, value=u'erhalten', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=10, end_char=15, value=u'haben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=17, end_char=22, value=u'falls', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=23, end_char=28, value=u'nicht', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=30, end_char=39, value=u'schreiben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=40, end_char=43, value=u'sie', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=44, end_char=46, value=u'an', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=47, end_char=50, value=u'die', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=51, end_char=55, value=u'free', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=56, end_char=64, value=u'software', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=65, end_char=75, value=u'foundation', gap=NO_GAP),
Token(start_line=19, end_line=19, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=1, end_char=4, value=u'inc', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=7, end_char=9, value=u'51', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=10, end_char=18, value=u'franklin', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=19, end_char=21, value=u'st', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=23, end_char=28, value=u'fifth', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=29, end_char=34, value=u'floor', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=36, end_char=42, value=u'boston', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=44, end_char=46, value=u'ma', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=47, end_char=52, value=u'02110', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=54, end_char=57, value=u'usa', gap=NO_GAP),
]
test_file = self.get_test_loc('analysis/unicode/12180.txt')
with codecs.open(test_file, encoding='utf-8') as test:
result = list(self.template_parsing(test))
assert expected == result
def test_process_template_does_not_crash_on_unicode_rules_text_1(self):
test_file = self.get_test_loc('analysis/unicode/12290.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_2(self):
test_file = self.get_test_loc('analysis/unicode/12319.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_3(self):
test_file = self.get_test_loc('analysis/unicode/12405.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_4(self):
test_file = self.get_test_loc('analysis/unicode/12407.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_5(self):
test_file = self.get_test_loc('analysis/unicode/12420.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_detects_non_well_formed_templatized_regions(self):
lines = u'abcd{{ef'
self.assertRaises(UnbalancedTemplateError, self.template_parsing, lines)
def test_process_template_handles_combination_of_well_formed_and_ill_formed_templates_2(self):
lines = u'}}{{{{abc}}ddd}}{{'
self.assertRaises(UnbalancedTemplateError, self.template_parsing, lines)
def test_process_template_can_parse_ill_formed_template(self):
tf = self.get_test_loc('analysis/ill_formed_template/text.txt')
lines = unicode_text_lines(tf)
result = list(self.template_parsing(lines))
expected_gaps = [30, 10, 60, 70, 20]
result_gaps = [x.gap for x in result if x.gap]
assert expected_gaps == result_gaps
et = self.get_test_loc('analysis/ill_formed_template/expected_grams.json')
result_dicts = [t._asdict() for t in result]
regen = False
if regen:
with codecs.open(et, 'w', encoding='utf-8') as out:
json.dump(result_dicts, out, indent=2)
with codecs.open(et, encoding='utf-8') as inp:
expected = json.load(inp)
assert expected == result_dicts
def test_token_positions_are_kept_same_for_unigrams_and_ngrams_with_template(self):
lines = u'some text is some text {{ }} in all cases\n \n'
unigrams = unigram_tokenizer(iter([lines]), template=False)
tunigrams = unigram_tokenizer(iter([lines]), template=True)
ngrams = ngram_tokenizer(iter([lines]), ngram_len=3, template=False)
tngrams = ngram_tokenizer(iter([lines]), ngram_len=3, template=True)
expected_start_end = (0, 7,)
def check_start_end(l):
l = list(l)
result = (l[0].start, l[-1].end,)
assert expected_start_end == result
check_start_end(unigrams)
check_start_end(tunigrams)
check_start_end(ngrams)
check_start_end(tngrams)
def test_plain_unigrams_from_templated_unigrams(self):
lines = [u'My old tailor {{3 John Doe}} is quite very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
result = list(template_processor(unigrams))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=0, value=u'my'),
Token(start=0, start_line=0, start_char=3, end_line=0, end_char=6, end=0, gap=0, value=u'old'),
Token(start=0, start_line=0, start_char=7, end_line=0, end_char=13, end=0, gap=3, value=u'tailor'),
Token(start=0, start_line=0, start_char=29, end_line=0, end_char=31, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=32, end_line=0, end_char=37, end=0, gap=0, value=u'quite'),
Token(start=0, start_line=0, start_char=38, end_line=0, end_char=42, end=0, gap=0, value=u'very'),
Token(start=0, start_line=0, start_char=43, end_line=0, end_char=47, end=0, gap=0, value=u'rich'),
]
assert expected == result
class TestLegacyNgrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_plain_ngrams_processor(self):
from collections import deque
def ngram_processor(items, ngram_len):
"""
Given a sequence or iterable of arbitrary items, return an iterator of
item ngrams tuples of length ngram_len. Buffers at most ngram_len iterable
items.
For example::
>>> list(ngram_processor([1, 2, 3, 4, 5], ngram_len=3))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
"""
ngram = deque()
current_len = 0
for item in items:
if current_len == ngram_len:
yield tuple(ngram)
ngram.popleft()
current_len -= 1
ngram.append(item)
current_len += 1
yield tuple(ngram)
text = (
u'''/*COMMENT
COMMENT COMMENT
- COMMENT
*/
public static boolean activateSearchResultView() {
String defaultPerspectiveId= SearchUI.getDefaultPerspectiveId();
if (defaultPerspectiveId != null) {
IWorkbenchWindow window= SearchPlugin.getActiveWorkbenchWindow();
if (window != null && window.getShell() != null && !window.getShell().isDisposed()) {
try {
PlatformUI.getWorkbench().showPerspective(defaultPerspectiveId, window);
} catch (WorkbenchException ex) {
// show view in current perspective
}
}
}''')
expected = [
(u'comment', u'comment', u'comment', u'comment', u'public', u'static'),
(u'comment', u'comment', u'comment', u'public', u'static', u'boolean'),
(u'comment', u'comment', u'public', u'static', u'boolean',
u'activatesearchresultview'),
(u'comment', u'public', u'static', u'boolean',
u'activatesearchresultview', u'string'),
(u'public', u'static', u'boolean', u'activatesearchresultview',
u'string', u'defaultperspectiveid'),
(u'static', u'boolean', u'activatesearchresultview', u'string',
u'defaultperspectiveid', u'searchui'),
(u'boolean', u'activatesearchresultview', u'string',
u'defaultperspectiveid', u'searchui', u'getdefaultperspectiveid'),
(u'activatesearchresultview', u'string', u'defaultperspectiveid',
u'searchui', u'getdefaultperspectiveid', u'if'),
(u'string', u'defaultperspectiveid', u'searchui',
u'getdefaultperspectiveid', u'if', u'defaultperspectiveid'),
(u'defaultperspectiveid', u'searchui', u'getdefaultperspectiveid',
u'if', u'defaultperspectiveid', u'null'),
(u'searchui', u'getdefaultperspectiveid', u'if',
u'defaultperspectiveid', u'null', u'iworkbenchwindow'),
(u'getdefaultperspectiveid', u'if', u'defaultperspectiveid', u'null',
u'iworkbenchwindow', u'window'),
(u'if', u'defaultperspectiveid', u'null', u'iworkbenchwindow',
u'window', u'searchplugin'),
(u'defaultperspectiveid', u'null', u'iworkbenchwindow', u'window',
u'searchplugin', u'getactiveworkbenchwindow'),
(u'null', u'iworkbenchwindow', u'window', u'searchplugin',
u'getactiveworkbenchwindow', u'if'),
(u'iworkbenchwindow', u'window', u'searchplugin',
u'getactiveworkbenchwindow', u'if', u'window'),
(u'window', u'searchplugin', u'getactiveworkbenchwindow', u'if',
u'window', u'null'),
(u'searchplugin', u'getactiveworkbenchwindow', u'if', u'window',
u'null', u'window'),
(u'getactiveworkbenchwindow', u'if', u'window', u'null', u'window',
u'getshell'),
(u'if', u'window', u'null', u'window', u'getshell', u'null'),
(u'window', u'null', u'window', u'getshell', u'null', u'window'),
(u'null', u'window', u'getshell', u'null', u'window', u'getshell'),
(u'window', u'getshell', u'null', u'window', u'getshell', u'isdisposed'),
(u'getshell', u'null', u'window', u'getshell', u'isdisposed', u'try'),
(u'null', u'window', u'getshell', u'isdisposed', u'try', u'platformui'),
(u'window', u'getshell', u'isdisposed', u'try', u'platformui',
u'getworkbench'),
(u'getshell', u'isdisposed', u'try', u'platformui', u'getworkbench',
u'showperspective'),
(u'isdisposed', u'try', u'platformui', u'getworkbench',
u'showperspective', u'defaultperspectiveid'),
(u'try', u'platformui', u'getworkbench', u'showperspective',
u'defaultperspectiveid', u'window'),
(u'platformui', u'getworkbench', u'showperspective',
u'defaultperspectiveid', u'window', u'catch'),
(u'getworkbench', u'showperspective', u'defaultperspectiveid',
u'window', u'catch', u'workbenchexception'),
(u'showperspective', u'defaultperspectiveid', u'window', u'catch',
u'workbenchexception', u'ex'),
(u'defaultperspectiveid', u'window', u'catch', u'workbenchexception',
u'ex', u'show'),
(u'window', u'catch', u'workbenchexception', u'ex', u'show', u'view'),
(u'catch', u'workbenchexception', u'ex', u'show', u'view', u'in'),
(u'workbenchexception', u'ex', u'show', u'view', u'in', u'current'),
(u'ex', u'show', u'view', u'in', u'current', u'perspective'),
]
unigrams = (x.value for x
in unigram_splitter(text.splitlines()))
result = list(ngram_processor(unigrams, ngram_len=6))
assert expected == result
class TestNgrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_tokens_ngram_processor_bigrams_from_unigrams(self):
text = u'this is some text \n on multiple lines'
unigrams = unigram_splitter(text.splitlines())
result = list(tokens_ngram_processor(unigrams, ngram_len=2))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some')),
(Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text')),
(Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on')),
(Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple')),
(Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple'),
Token(start_line=1, start_char=13, end_line=1, end_char=18, value=u'lines'))
]
assert expected == result
def test_tokens_ngram_processor_n2_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=2))
assert expected == result
def test_tokens_ngram_processor_n3_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=3))
assert expected == result
def test_tokens_ngram_processor_n4_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=4))
assert expected == result
def test_tokens_ngram_processor_n10_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=10))
assert expected == result
def test_tokens_ngram_processor_n1_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=1))
assert expected == result
def test_tokens_ngram_processor_3grams_from_unigrams_on_multilines(self):
text = u'this is some text \n on multiple lines'
unigrams = unigram_splitter(text.splitlines())
result = list(tokens_ngram_processor(unigrams, ngram_len=3))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some')),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text')),
(Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on')),
(Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple')),
(Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple'),
Token(start_line=1, start_char=13, end_line=1, end_char=18, value=u'lines'))
]
assert expected == result
def test_tokens_ngram_processor_with_template_gaps_basic(self):
lines = [u'My old {{3 John Doe}} is rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=3))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=0, value=u'my'),
Token(start=0, start_line=0, start_char=3, end_line=0, end_char=6, end=0, gap=3, value=u'old'),
),
(Token(start=0, start_line=0, start_char=22, end_line=0, end_char=24, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=25, end_line=0, end_char=29, end=0, gap=0, value=u'rich'),
)
]
assert expected == result
def test_tokens_ngram_processor_with_template_gaps_merged(self):
lines = [u'My old tailor {{3 John Doe}} is quite very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start_line=0, start_char=0, end_line=0, end_char=13, gap=ngram_len, value=(u'my', u'old', u'tailor')),
Token(start_line=0, start_char=29, end_line=0, end_char=42, gap=0, value=(u'is', u'quite', u'very')),
Token(start_line=0, start_char=32, end_line=0, end_char=47, gap=0, value=(u'quite', u'very', u'rich')),
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_short_grams(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }} very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite')),
Token(start=0, start_line=0, start_char=48, end_line=0, end_char=57, end=0, gap=0, value=(u'very', u'rich'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_short_and_long_grams(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }} very rich really rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite')),
Token(start=0, start_line=0, start_char=48, end_line=0, end_char=64, end=0, gap=0, value=(u'very', u'rich', u'really')),
Token(start=0, start_line=0, start_char=53, end_line=0, end_char=69, end=0, gap=0, value=(u'rich', u'really', u'rich'))
]
assert expected == result
def test_ngram_to_token_processor_with_gaps_at_the_end(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }}']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_at_the_end_does_yield_empty_tuples(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }}']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
result = list(tokens_ngram_processor(templated, ngram_len=ngram_len))
assert (None, None, None,) != result[-1]
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=u'my'),),
(Token(start=0, start_line=0, start_char=20, end_line=0, end_char=22, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=23, end_line=0, end_char=28, end=0, gap=5, value=u'quite'),
)
]
assert expected == result
def test_ngrams_tokenizer_does_not_yield_4grams_for_3grams(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the names {{}}of its contributors may
materials provided with the distribution.'''.splitlines()
result = list(ngram_tokenizer(iter(lines), ngram_len=3, template=True))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=16, end=2, gap=0, value=(u'neither', u'the', u'name')),
Token(start=1, start_line=0, start_char=8, end_line=0, end_char=19, end=3, gap=10, value=(u'the', u'name', u'of')),
Token(start=4, start_line=0, start_char=44, end_line=0, end_char=47, end=4, gap=5, value=(u'nor',)),
Token(start=5, start_line=0, start_char=52, end_line=0, end_char=61, end=6, gap=5, value=(u'the', u'names')),
Token(start=7, start_line=0, start_char=66, end_line=0, end_char=85, end=9, gap=0, value=(u'of', u'its', u'contributors')),
Token(start=8, start_line=0, start_char=69, end_line=0, end_char=89, end=10, gap=0, value=(u'its', u'contributors', u'may')),
Token(start=9, start_line=0, start_char=73, end_line=1, end_char=25, end=11, gap=0, value=(u'contributors', u'may', u'materials')),
Token(start=10, start_line=0, start_char=86, end_line=1, end_char=34, end=12, gap=0, value=(u'may', u'materials', u'provided')),
Token(start=11, start_line=1, start_char=16, end_line=1, end_char=39, end=13, gap=0, value=(u'materials', u'provided', u'with')),
Token(start=12, start_line=1, start_char=26, end_line=1, end_char=43, end=14, gap=0, value=(u'provided', u'with', u'the')),
Token(start=13, start_line=1, start_char=35, end_line=1, end_char=56, end=15, gap=0, value=(u'with', u'the', u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_always_returns_3grams_when_requested(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the
names {{}}of its contributors may materials provided with
the distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=3))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name')),
(Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=0, start_char=52, end_line=0, end_char=55, end=0, gap=0, value=u'the'),
Token(start=0, start_line=1, start_char=19, end_line=1, end_char=24, end=0, gap=5, value=u'names')),
(Token(start=0, start_line=1, start_char=29, end_line=1, end_char=31, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=23, end_line=2, end_char=35, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_always_returns_4grams_when_requested(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the
names {{}}of its contributors may materials provided with
the distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=4))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=0, start_char=52, end_line=0, end_char=55, end=0, gap=0, value=u'the'),
Token(start=0, start_line=1, start_char=19, end_line=1, end_char=24, end=0, gap=5, value=u'names')),
(Token(start=0, start_line=1, start_char=29, end_line=1, end_char=31, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=23, end_line=2, end_char=35, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_can_handle_contiguous_template_regions(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}
{{6 }}of its contributors may materials provided with the
distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=4))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=1, start_char=25, end_line=1, end_char=27, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=28, end_line=1, end_char=31, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=28, end_line=1, end_char=31, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with'),
Token(start=0, start_line=1, start_char=73, end_line=1, end_char=76, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with'),
Token(start=0, start_line=1, start_char=73, end_line=1, end_char=76, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=31, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_ngram_tokenizer_can_handle_gaps_at_end_of_text(self):
lines = [u'Neither the name of {{10 the ORGANIZATION}} ']
ngram_len = 2
result = list(ngram_tokenizer(lines, ngram_len, template=True))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'neither', u'the')),
Token(start=1, start_line=0, start_char=8, end_line=0, end_char=16, end=2, gap=0, value=(u'the', u'name')),
Token(start=2, start_line=0, start_char=12, end_line=0, end_char=19, end=3, gap=10, value=(u'name', u'of'))
]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_n3(self):
lines = [u'X11 License']
ngram_len = 3
result = list(ngram_tokenizer(lines, ngram_len))
assert lines == list(doc_subset(lines, result[0]))
expected = [Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'x11', u'license'))]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_n1(self):
lines = [u'X11 License']
ngram_len = 1
result = list(ngram_tokenizer(lines, ngram_len))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=3, end=0, gap=0, value=(u'x11',)),
Token(start=1, start_line=0, start_char=4, end_line=0, end_char=11, end=1, gap=0, value=(u'license',)),
]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_template(self):
lines = [u'X11 License']
ngram_len = 3
result = list(ngram_tokenizer(lines, ngram_len, template=True))
assert lines == list(doc_subset(lines, result[0]))
expected = [Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'x11', u'license'))]
assert expected == result
def test_unicode_text_lines_handles_weird_xml_encodings(self):
test_file = self.get_test_loc('analysis/weird_encoding/easyconf-0.9.0.pom')
result = list(unicode_text_lines(test_file))
expected_file = self.get_test_loc('analysis/weird_encoding/easyconf-0.9.0.pom.expected')
with open(expected_file, 'rb') as tf:
expected = cPickle.load(tf)
assert expected == result
class TestMultigrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
# TODO: add more tests beyond the simple doctests that exist in the code
@skipIf(True, 'Performance tests only')
class TestAnalysisPerformance(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_splitter_perf(self):
test_file = self.get_test_loc('perf/test.txt')
text = open(test_file).read() * 100
utext = unicode(text)
setup1 = '''
import re
from textcode import analysis
unicode_ws = analysis.word_splitter
plain_ws =re.compile(r'[^\W_]+').finditer
unicode_ts = analysis.template_splitter
plain_ts= re.compile(r'(?:[^\W_])+|(?:\{\{)|(?:\}\})').finditer
text = %r
utext = %r''' % (text, utext)
def check_perf(setup):
from timeit import timeit
stmt = 'list(w for w in %s(%s))'
print()
print('Unicode template')
print(timeit(stmt % ('unicode_ts', 'utext'), setup=setup, number=1000))
print('Plain template')
print(timeit(stmt % ('plain_ts', 'text'), setup=setup, number=1000))
print('Unicode words')
print(timeit(stmt % ('unicode_ws', 'utext'), setup=setup, number=1000))
print('Plain words')
print(timeit(stmt % ('plain_ws', 'text'), setup=setup, number=1000))
print('Plain split')
print(timeit('text.split()', setup=setup, number=1000))
print('Unicode split')
print(timeit('utext.split()', setup=setup, number=1000))
print('Line split')
print(timeit('text.splitlines(False)', setup=setup, number=1000))
print('Line split with ends')
print(timeit('text.splitlines(True)', setup=setup, number=1000))
check_perf(setup=setup1)
setup2 = '''
import re
from textcode import analysis
unicode_ws = analysis.word_splitter
plain_ws =re.compile(r'[^\W_]+').finditer
unicode_ts = analysis.template_splitter
plain_ts= re.compile(r'(?:[^\W_])+|(?:\{\{)|(?:\}\})').finditer
text = %r
utext = %r''' % (text, utext)
check_perf(setup=setup2)
|
__init__
|
Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
|
"""The Simulator class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
"""The Simulator class."""
# MASKED: __init__ function (lines 23-51)
def __del__(self):
"""Delete the simulator."""
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
"""Reset the simulation."""
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
"""Start the simulation."""
self.physics.start()
self._num_steps = 0
def step(self):
"""Take a simulation step."""
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
"""Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
"""
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
# Create the body.
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
# Add the body to the dictionary.
self._bodies[body.name] = body
return body
def remove_body(self, name):
"""Remove the body.
Args:
body: An instance of Body.
"""
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
"""Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
"""
# Create the constraint.
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
# Add the constraint to the dictionary.
self._constraints[constraint.name] = constraint
return constraint
def receive_robot_commands(self,
robot_command,
component_type='body'):
"""Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
"""
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
def check_contact(self, entity_a, entity_b=None):
"""Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
"""
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
"""Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
"""
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
"""Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
"""
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
# Check if all bodies are stable.
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
"""Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
"""
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
"""Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
"""
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
"""Clear all visualization items."""
pybullet.removeAllUserDebugItems()
|
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
"""Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
"""
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
# Create the physics backend.
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
| 23
| 51
|
"""The Simulator class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
"""The Simulator class."""
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
"""Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
"""
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
# Create the physics backend.
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
def __del__(self):
"""Delete the simulator."""
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
"""Reset the simulation."""
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
"""Start the simulation."""
self.physics.start()
self._num_steps = 0
def step(self):
"""Take a simulation step."""
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
"""Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
"""
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
# Create the body.
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
# Add the body to the dictionary.
self._bodies[body.name] = body
return body
def remove_body(self, name):
"""Remove the body.
Args:
body: An instance of Body.
"""
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
"""Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
"""
# Create the constraint.
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
# Add the constraint to the dictionary.
self._constraints[constraint.name] = constraint
return constraint
def receive_robot_commands(self,
robot_command,
component_type='body'):
"""Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
"""
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
def check_contact(self, entity_a, entity_b=None):
"""Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
"""
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
"""Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
"""
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
"""Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
"""
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
# Check if all bodies are stable.
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
"""Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
"""
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
"""Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
"""
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
"""Clear all visualization items."""
pybullet.removeAllUserDebugItems()
|
receive_robot_commands
|
Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
|
"""The Simulator class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
"""The Simulator class."""
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
"""Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
"""
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
# Create the physics backend.
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
def __del__(self):
"""Delete the simulator."""
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
"""Reset the simulation."""
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
"""Start the simulation."""
self.physics.start()
self._num_steps = 0
def step(self):
"""Take a simulation step."""
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
"""Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
"""
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
# Create the body.
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
# Add the body to the dictionary.
self._bodies[body.name] = body
return body
def remove_body(self, name):
"""Remove the body.
Args:
body: An instance of Body.
"""
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
"""Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
"""
# Create the constraint.
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
# Add the constraint to the dictionary.
self._constraints[constraint.name] = constraint
return constraint
# MASKED: receive_robot_commands function (lines 226-244)
def check_contact(self, entity_a, entity_b=None):
"""Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
"""
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
"""Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
"""
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
"""Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
"""
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
# Check if all bodies are stable.
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
"""Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
"""
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
"""Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
"""
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
"""Clear all visualization items."""
pybullet.removeAllUserDebugItems()
|
def receive_robot_commands(self,
robot_command,
component_type='body'):
"""Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
"""
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
| 226
| 244
|
"""The Simulator class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
"""The Simulator class."""
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
"""Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
"""
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
# Create the physics backend.
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
def __del__(self):
"""Delete the simulator."""
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
"""Reset the simulation."""
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
"""Start the simulation."""
self.physics.start()
self._num_steps = 0
def step(self):
"""Take a simulation step."""
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
"""Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
"""
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
# Create the body.
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
# Add the body to the dictionary.
self._bodies[body.name] = body
return body
def remove_body(self, name):
"""Remove the body.
Args:
body: An instance of Body.
"""
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
"""Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
"""
# Create the constraint.
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
# Add the constraint to the dictionary.
self._constraints[constraint.name] = constraint
return constraint
def receive_robot_commands(self,
robot_command,
component_type='body'):
"""Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
"""
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
def check_contact(self, entity_a, entity_b=None):
"""Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
"""
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
"""Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
"""
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
"""Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
"""
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
# Check if all bodies are stable.
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
"""Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
"""
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
"""Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
"""
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
"""Clear all visualization items."""
pybullet.removeAllUserDebugItems()
|
check_contact
|
Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
|
"""The Simulator class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
"""The Simulator class."""
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
"""Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
"""
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
# Create the physics backend.
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
def __del__(self):
"""Delete the simulator."""
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
"""Reset the simulation."""
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
"""Start the simulation."""
self.physics.start()
self._num_steps = 0
def step(self):
"""Take a simulation step."""
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
"""Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
"""
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
# Create the body.
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
# Add the body to the dictionary.
self._bodies[body.name] = body
return body
def remove_body(self, name):
"""Remove the body.
Args:
body: An instance of Body.
"""
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
"""Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
"""
# Create the constraint.
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
# Add the constraint to the dictionary.
self._constraints[constraint.name] = constraint
return constraint
def receive_robot_commands(self,
robot_command,
component_type='body'):
"""Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
"""
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
# MASKED: check_contact function (lines 246-287)
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
"""Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
"""
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
"""Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
"""
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
# Check if all bodies are stable.
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
"""Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
"""
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
"""Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
"""
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
"""Clear all visualization items."""
pybullet.removeAllUserDebugItems()
|
def check_contact(self, entity_a, entity_b=None):
"""Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
"""
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
| 246
| 287
|
"""The Simulator class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
"""The Simulator class."""
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
"""Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
"""
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
# Create the physics backend.
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
def __del__(self):
"""Delete the simulator."""
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
"""Reset the simulation."""
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
"""Start the simulation."""
self.physics.start()
self._num_steps = 0
def step(self):
"""Take a simulation step."""
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
"""Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
"""
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
# Create the body.
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
# Add the body to the dictionary.
self._bodies[body.name] = body
return body
def remove_body(self, name):
"""Remove the body.
Args:
body: An instance of Body.
"""
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
"""Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
"""
# Create the constraint.
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
# Add the constraint to the dictionary.
self._constraints[constraint.name] = constraint
return constraint
def receive_robot_commands(self,
robot_command,
component_type='body'):
"""Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
"""
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
def check_contact(self, entity_a, entity_b=None):
"""Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
"""
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
"""Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
"""
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
"""Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
"""
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
# Check if all bodies are stable.
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
"""Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
"""
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
"""Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
"""
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
"""Clear all visualization items."""
pybullet.removeAllUserDebugItems()
|
check_stable
|
Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
|
"""The Simulator class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
"""The Simulator class."""
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
"""Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
"""
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
# Create the physics backend.
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
def __del__(self):
"""Delete the simulator."""
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
"""Reset the simulation."""
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
"""Start the simulation."""
self.physics.start()
self._num_steps = 0
def step(self):
"""Take a simulation step."""
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
"""Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
"""
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
# Create the body.
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
# Add the body to the dictionary.
self._bodies[body.name] = body
return body
def remove_body(self, name):
"""Remove the body.
Args:
body: An instance of Body.
"""
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
"""Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
"""
# Create the constraint.
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
# Add the constraint to the dictionary.
self._constraints[constraint.name] = constraint
return constraint
def receive_robot_commands(self,
robot_command,
component_type='body'):
"""Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
"""
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
def check_contact(self, entity_a, entity_b=None):
"""Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
"""
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
# MASKED: check_stable function (lines 289-323)
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
"""Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
"""
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
# Check if all bodies are stable.
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
"""Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
"""
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
"""Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
"""
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
"""Clear all visualization items."""
pybullet.removeAllUserDebugItems()
|
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
"""Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
"""
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
| 289
| 323
|
"""The Simulator class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
"""The Simulator class."""
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
"""Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
"""
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
# Create the physics backend.
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
def __del__(self):
"""Delete the simulator."""
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
"""Reset the simulation."""
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
"""Start the simulation."""
self.physics.start()
self._num_steps = 0
def step(self):
"""Take a simulation step."""
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
"""Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
"""
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
# Create the body.
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
# Add the body to the dictionary.
self._bodies[body.name] = body
return body
def remove_body(self, name):
"""Remove the body.
Args:
body: An instance of Body.
"""
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
"""Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
"""
# Create the constraint.
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
# Add the constraint to the dictionary.
self._constraints[constraint.name] = constraint
return constraint
def receive_robot_commands(self,
robot_command,
component_type='body'):
"""Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
"""
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
def check_contact(self, entity_a, entity_b=None):
"""Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
"""
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
"""Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
"""
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
"""Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
"""
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
# Check if all bodies are stable.
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
"""Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
"""
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
"""Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
"""
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
"""Clear all visualization items."""
pybullet.removeAllUserDebugItems()
|
wait_until_stable
|
Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
|
"""The Simulator class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
"""The Simulator class."""
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
"""Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
"""
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
# Create the physics backend.
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
def __del__(self):
"""Delete the simulator."""
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
"""Reset the simulation."""
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
"""Start the simulation."""
self.physics.start()
self._num_steps = 0
def step(self):
"""Take a simulation step."""
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
"""Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
"""
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
# Create the body.
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
# Add the body to the dictionary.
self._bodies[body.name] = body
return body
def remove_body(self, name):
"""Remove the body.
Args:
body: An instance of Body.
"""
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
"""Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
"""
# Create the constraint.
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
# Add the constraint to the dictionary.
self._constraints[constraint.name] = constraint
return constraint
def receive_robot_commands(self,
robot_command,
component_type='body'):
"""Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
"""
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
def check_contact(self, entity_a, entity_b=None):
"""Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
"""
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
"""Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
"""
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
# MASKED: wait_until_stable function (lines 325-376)
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
"""Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
"""
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
"""Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
"""
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
"""Clear all visualization items."""
pybullet.removeAllUserDebugItems()
|
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
"""Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
"""
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
# Check if all bodies are stable.
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
| 325
| 376
|
"""The Simulator class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
"""The Simulator class."""
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
"""Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
"""
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
# Create the physics backend.
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
def __del__(self):
"""Delete the simulator."""
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
"""Reset the simulation."""
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
"""Start the simulation."""
self.physics.start()
self._num_steps = 0
def step(self):
"""Take a simulation step."""
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
"""Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
"""
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
# Create the body.
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
# Add the body to the dictionary.
self._bodies[body.name] = body
return body
def remove_body(self, name):
"""Remove the body.
Args:
body: An instance of Body.
"""
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
"""Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
"""
# Create the constraint.
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
# Add the constraint to the dictionary.
self._constraints[constraint.name] = constraint
return constraint
def receive_robot_commands(self,
robot_command,
component_type='body'):
"""Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
"""
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
def check_contact(self, entity_a, entity_b=None):
"""Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
"""
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
"""Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
"""
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
"""Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
"""
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
# Check if all bodies are stable.
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
"""Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
"""
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
"""Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
"""
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
"""Clear all visualization items."""
pybullet.removeAllUserDebugItems()
|
plot_pose
|
Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
|
"""The Simulator class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
"""The Simulator class."""
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
"""Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
"""
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
# Create the physics backend.
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
def __del__(self):
"""Delete the simulator."""
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
"""Reset the simulation."""
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
"""Start the simulation."""
self.physics.start()
self._num_steps = 0
def step(self):
"""Take a simulation step."""
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
"""Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
"""
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
# Create the body.
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
# Add the body to the dictionary.
self._bodies[body.name] = body
return body
def remove_body(self, name):
"""Remove the body.
Args:
body: An instance of Body.
"""
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
"""Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
"""
# Create the constraint.
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
# Add the constraint to the dictionary.
self._constraints[constraint.name] = constraint
return constraint
def receive_robot_commands(self,
robot_command,
component_type='body'):
"""Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
"""
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
def check_contact(self, entity_a, entity_b=None):
"""Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
"""
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
"""Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
"""
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
"""Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
"""
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
# Check if all bodies are stable.
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
# MASKED: plot_pose function (lines 378-424)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
"""Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
"""
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
"""Clear all visualization items."""
pybullet.removeAllUserDebugItems()
|
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
"""Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
"""
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
| 378
| 424
|
"""The Simulator class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
"""The Simulator class."""
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
"""Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
"""
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
# Create the physics backend.
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
def __del__(self):
"""Delete the simulator."""
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
"""Reset the simulation."""
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
"""Start the simulation."""
self.physics.start()
self._num_steps = 0
def step(self):
"""Take a simulation step."""
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
"""Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
"""
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
# Create the body.
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
# Add the body to the dictionary.
self._bodies[body.name] = body
return body
def remove_body(self, name):
"""Remove the body.
Args:
body: An instance of Body.
"""
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
"""Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
"""
# Create the constraint.
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
# Add the constraint to the dictionary.
self._constraints[constraint.name] = constraint
return constraint
def receive_robot_commands(self,
robot_command,
component_type='body'):
"""Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
"""
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
def check_contact(self, entity_a, entity_b=None):
"""Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
"""
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
"""Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
"""
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
"""Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
"""
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
# Check if all bodies are stable.
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
"""Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
"""
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
"""Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
"""
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
"""Clear all visualization items."""
pybullet.removeAllUserDebugItems()
|
parse_dsn
|
Parse connection string into a dictionary of keywords and values.
Connection string format:
vertica://<user>:<password>@<host>:<port>/<database>?k1=v1&k2=v2&...
|
# Copyright (c) 2018-2022 Micro Focus or one of its affiliates.
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2013-2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function, division, absolute_import
import base64
import logging
import socket
import ssl
import getpass
import uuid
from struct import unpack
from collections import deque, namedtuple
import random
# noinspection PyCompatibility,PyUnresolvedReferences
from six import raise_from, string_types, integer_types, PY2
if PY2:
from urlparse import urlparse, parse_qs
else:
from urllib.parse import urlparse, parse_qs
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Dict, Literal, Optional, Type, Union
from typing_extensions import Self
import vertica_python
from .. import errors
from ..vertica import messages
from ..vertica.cursor import Cursor
from ..vertica.messages.message import BackendMessage, FrontendMessage
from ..vertica.messages.frontend_messages import CancelRequest
from ..vertica.log import VerticaLogging
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 5433
DEFAULT_PASSWORD = ''
DEFAULT_AUTOCOMMIT = False
DEFAULT_BACKUP_SERVER_NODE = []
DEFAULT_KRB_SERVICE_NAME = 'vertica'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_PATH = 'vertica_python.log'
try:
DEFAULT_USER = getpass.getuser()
except Exception as e:
DEFAULT_USER = None
print("WARN: Cannot get the login user name: {}".format(str(e)))
def connect(**kwargs):
# type: (Any) -> Connection
"""Opens a new connection to a Vertica database."""
return Connection(kwargs)
# MASKED: parse_dsn function (lines 91-134)
_AddressEntry = namedtuple('_AddressEntry', ['host', 'resolved', 'data'])
class _AddressList(object):
def __init__(self, host, port, backup_nodes, logger):
"""Creates a new deque with the primary host first, followed by any backup hosts"""
self._logger = logger
# Items in address_deque are _AddressEntry values.
# host is the original hostname/ip, used by SSL option check_hostname
# - when resolved is False, data is port
# - when resolved is True, data is the 5-tuple from socket.getaddrinfo
# This allows for lazy resolution. Seek peek() for more.
self.address_deque = deque()
# load primary host into address_deque
self._append(host, port)
# load backup nodes into address_deque
if not isinstance(backup_nodes, list):
err_msg = 'Connection option "backup_server_node" must be a list'
self._logger.error(err_msg)
raise TypeError(err_msg)
# Each item in backup_nodes should be either
# a host name or IP address string (using default port) or
# a (host, port) tuple
for node in backup_nodes:
if isinstance(node, string_types):
self._append(node, DEFAULT_PORT)
elif isinstance(node, tuple) and len(node) == 2:
self._append(node[0], node[1])
else:
err_msg = ('Each item of connection option "backup_server_node"'
' must be a host string or a (host, port) tuple')
self._logger.error(err_msg)
raise TypeError(err_msg)
self._logger.debug('Address list: {0}'.format(list(self.address_deque)))
def _append(self, host, port):
if not isinstance(host, string_types):
err_msg = 'Host must be a string: invalid value: {0}'.format(host)
self._logger.error(err_msg)
raise TypeError(err_msg)
if not isinstance(port, (string_types, integer_types)):
err_msg = 'Port must be an integer or a string: invalid value: {0}'.format(port)
self._logger.error(err_msg)
raise TypeError(err_msg)
elif isinstance(port, string_types):
try:
port = int(port)
except ValueError as e:
err_msg = 'Port "{0}" is not a valid string: {1}'.format(port, e)
self._logger.error(err_msg)
raise ValueError(err_msg)
if port < 0 or port > 65535:
err_msg = 'Invalid port number: {0}'.format(port)
self._logger.error(err_msg)
raise ValueError(err_msg)
self.address_deque.append(_AddressEntry(host=host, resolved=False, data=port))
def push(self, host, port):
self.address_deque.appendleft(_AddressEntry(host=host, resolved=False, data=port))
def pop(self):
self.address_deque.popleft()
def peek(self):
# do lazy DNS resolution, returning the leftmost socket.getaddrinfo result
if len(self.address_deque) == 0:
return None
while len(self.address_deque) > 0:
self._logger.debug('Peek at address list: {0}'.format(list(self.address_deque)))
entry = self.address_deque[0]
if entry.resolved:
# return a resolved sockaddrinfo
return entry.data
else:
# DNS resolve a single host name to multiple IP addresses
self.pop()
# keep host and port info for adding address entry to deque once it has been resolved
host, port = entry.host, entry.data
try:
resolved_hosts = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
except Exception as e:
self._logger.warning('Error resolving host "{0}" on port {1}: {2}'.format(host, port, e))
continue
# add resolved addrinfo (AF_INET and AF_INET6 only) to deque
random.shuffle(resolved_hosts)
for addrinfo in resolved_hosts:
if addrinfo[0] in (socket.AF_INET, socket.AF_INET6):
self.address_deque.appendleft(_AddressEntry(
host=host, resolved=True, data=addrinfo))
return None
def peek_host(self):
# returning the leftmost host result
self._logger.debug('Peek host at address list: {0}'.format(list(self.address_deque)))
if len(self.address_deque) == 0:
return None
return self.address_deque[0].host
def _generate_session_label():
return '{type}-{version}-{id}'.format(
type='vertica-python',
version=vertica_python.__version__,
id=uuid.uuid1()
)
class Connection(object):
def __init__(self, options=None):
# type: (Optional[Dict[str, Any]]) -> None
self.parameters = {}
self.session_id = None
self.backend_pid = None
self.backend_key = None
self.transaction_status = None
self.socket = None
self.socket_as_file = None
options = options or {}
self.options = parse_dsn(options['dsn']) if 'dsn' in options else {}
self.options.update({key: value for key, value in options.items() \
if key == 'log_path' or (key != 'dsn' and value is not None)})
# Set up connection logger
logger_name = 'vertica_{0}_{1}'.format(id(self), str(uuid.uuid4())) # must be a unique value
self._logger = logging.getLogger(logger_name)
if 'log_level' not in self.options and 'log_path' not in self.options:
# logger is disabled by default
self._logger.disabled = True
else:
self.options.setdefault('log_level', DEFAULT_LOG_LEVEL)
self.options.setdefault('log_path', DEFAULT_LOG_PATH)
VerticaLogging.setup_logging(logger_name, self.options['log_path'],
self.options['log_level'], id(self))
self.options.setdefault('host', DEFAULT_HOST)
self.options.setdefault('port', DEFAULT_PORT)
if 'user' not in self.options:
if DEFAULT_USER:
self.options['user'] = DEFAULT_USER
else:
msg = 'Connection option "user" is required'
self._logger.error(msg)
raise KeyError(msg)
self.options.setdefault('database', self.options['user'])
self.options.setdefault('password', DEFAULT_PASSWORD)
self.options.setdefault('autocommit', DEFAULT_AUTOCOMMIT)
self.options.setdefault('session_label', _generate_session_label())
self.options.setdefault('backup_server_node', DEFAULT_BACKUP_SERVER_NODE)
self.options.setdefault('kerberos_service_name', DEFAULT_KRB_SERVICE_NAME)
# Kerberos authentication hostname defaults to the host value here so
# the correct value cannot be overwritten by load balancing or failover
self.options.setdefault('kerberos_host_name', self.options['host'])
self.address_list = _AddressList(self.options['host'], self.options['port'],
self.options['backup_server_node'], self._logger)
# we only support one cursor per connection
self.options.setdefault('unicode_error', None)
self._cursor = Cursor(self, self._logger, cursor_type=None,
unicode_error=self.options['unicode_error'])
# knob for using server-side prepared statements
self.options.setdefault('use_prepared_statements', False)
self._logger.debug('Connection prepared statements is {}'.format(
'enabled' if self.options['use_prepared_statements'] else 'disabled'))
# knob for disabling COPY LOCAL operations
self.options.setdefault('disable_copy_local', False)
self._logger.debug('COPY LOCAL operation is {}'.format(
'disabled' if self.options['disable_copy_local'] else 'enabled'))
self._logger.info('Connecting as user "{}" to database "{}" on host "{}" with port {}'.format(
self.options['user'], self.options['database'],
self.options['host'], self.options['port']))
self.startup_connection()
# Initially, for a new session, autocommit is off
if self.options['autocommit']:
self.autocommit = True
self._logger.info('Connection is ready')
#############################################
# supporting `with` statements
#############################################
def __enter__(self):
# type: () -> Self
return self
def __exit__(self, type_, value, traceback):
self.close()
#############################################
# dbapi methods
#############################################
def close(self):
self._logger.info('Close the connection')
try:
self.write(messages.Terminate())
finally:
self.close_socket()
def commit(self):
if self.closed():
raise errors.ConnectionError('Connection is closed')
cur = self.cursor()
cur.execute('COMMIT;')
def rollback(self):
if self.closed():
raise errors.ConnectionError('Connection is closed')
cur = self.cursor()
cur.execute('ROLLBACK;')
def cursor(self, cursor_type=None):
# type: (Self, Optional[Union[Literal['list', 'dict'], Type[list[Any]], Type[dict[Any, Any]]]]) -> Cursor
if self.closed():
raise errors.ConnectionError('Connection is closed')
if self._cursor.closed():
self._cursor._closed = False
# let user change type if they want?
self._cursor.cursor_type = cursor_type
return self._cursor
#############################################
# non-dbapi methods
#############################################
@property
def autocommit(self):
"""Read the connection's AUTOCOMMIT setting from cache"""
return self.parameters.get('auto_commit', 'off') == 'on'
@autocommit.setter
def autocommit(self, value):
"""Change the connection's AUTOCOMMIT setting"""
if self.autocommit is value:
return
val = 'on' if value else 'off'
cur = self.cursor()
cur.execute('SET SESSION AUTOCOMMIT TO {}'.format(val), use_prepared_statements=False)
cur.fetchall() # check for errors and update the cache
def cancel(self):
"""Cancel the current database operation. This can be called from a
different thread than the one currently executing a database operation.
"""
if self.closed():
raise errors.ConnectionError('Connection is closed')
self._logger.info('Canceling the current database operation')
# Must create a new socket connection to the server
temp_socket = self.establish_socket_connection(self.address_list)
self.write(CancelRequest(self.backend_pid, self.backend_key), temp_socket)
temp_socket.close()
self._logger.info('Cancel request issued')
def opened(self):
return (self.socket is not None
and self.backend_pid is not None
and self.transaction_status is not None)
def closed(self):
return not self.opened()
def __str__(self):
safe_options = {key: value for key, value in self.options.items() if key != 'password'}
s1 = "<Vertica.Connection:{0} parameters={1} backend_pid={2}, ".format(
id(self), self.parameters, self.backend_pid)
s2 = "backend_key={0}, transaction_status={1}, socket={2}, options={3}>".format(
self.backend_key, self.transaction_status, self.socket, safe_options)
return ''.join([s1, s2])
#############################################
# internal
#############################################
def reset_values(self):
self.parameters = {}
self.session_id = None
self.backend_pid = None
self.backend_key = None
self.transaction_status = None
self.socket = None
self.socket_as_file = None
self.address_list = _AddressList(self.options['host'], self.options['port'],
self.options['backup_server_node'], self._logger)
def _socket(self):
if self.socket:
return self.socket
# the initial establishment of the client connection
raw_socket = self.establish_socket_connection(self.address_list)
# enable load balancing
load_balance_options = self.options.get('connection_load_balance')
self._logger.debug('Connection load balance option is {0}'.format(
'enabled' if load_balance_options else 'disabled'))
if load_balance_options:
raw_socket = self.balance_load(raw_socket)
# enable SSL
ssl_options = self.options.get('ssl')
self._logger.debug('SSL option is {0}'.format('enabled' if ssl_options else 'disabled'))
if ssl_options:
raw_socket = self.enable_ssl(raw_socket, ssl_options)
self.socket = raw_socket
return self.socket
def _socket_as_file(self):
if self.socket_as_file is None:
self.socket_as_file = self._socket().makefile('rb')
return self.socket_as_file
def create_socket(self, family):
"""Create a TCP socket object"""
raw_socket = socket.socket(family, socket.SOCK_STREAM)
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
connection_timeout = self.options.get('connection_timeout')
if connection_timeout is not None:
self._logger.debug('Set socket connection timeout: {0}'.format(connection_timeout))
raw_socket.settimeout(connection_timeout)
return raw_socket
def balance_load(self, raw_socket):
# Send load balance request and read server response
self._logger.debug('=> %s', messages.LoadBalanceRequest())
raw_socket.sendall(messages.LoadBalanceRequest().get_message())
response = raw_socket.recv(1)
if response == b'Y':
size = unpack('!I', raw_socket.recv(4))[0]
if size < 4:
err_msg = "Bad message size: {0}".format(size)
self._logger.error(err_msg)
raise errors.MessageError(err_msg)
res = BackendMessage.from_type(type_=response, data=raw_socket.recv(size - 4))
self._logger.debug('<= %s', res)
host = res.get_host()
port = res.get_port()
self._logger.info('Load balancing to host "{0}" on port {1}'.format(host, port))
peer = raw_socket.getpeername()
socket_host, socket_port = peer[0], peer[1]
if host == socket_host and port == socket_port:
self._logger.info('Already connecting to host "{0}" on port {1}. Ignore load balancing.'.format(host, port))
return raw_socket
# Push the new host onto the address list before connecting again. Note that this
# will leave the originally-specified host as the first failover possibility.
self.address_list.push(host, port)
raw_socket.close()
raw_socket = self.establish_socket_connection(self.address_list)
else:
self._logger.debug('<= LoadBalanceResponse: %s', response)
self._logger.warning("Load balancing requested but not supported by server")
return raw_socket
def enable_ssl(self, raw_socket, ssl_options):
# Send SSL request and read server response
self._logger.debug('=> %s', messages.SslRequest())
raw_socket.sendall(messages.SslRequest().get_message())
response = raw_socket.recv(1)
self._logger.debug('<= SslResponse: %s', response)
if response == b'S':
self._logger.info('Enabling SSL')
try:
if isinstance(ssl_options, ssl.SSLContext):
server_host = self.address_list.peek_host()
if server_host is None: # This should not happen
msg = 'Cannot get the connected server host while enabling SSL'
self._logger.error(msg)
raise errors.ConnectionError(msg)
raw_socket = ssl_options.wrap_socket(raw_socket, server_hostname=server_host)
else:
raw_socket = ssl.wrap_socket(raw_socket)
except ssl.CertificateError as e:
raise_from(errors.ConnectionError(str(e)), e)
except ssl.SSLError as e:
raise_from(errors.ConnectionError(str(e)), e)
else:
err_msg = "SSL requested but not supported by server"
self._logger.error(err_msg)
raise errors.SSLNotSupported(err_msg)
return raw_socket
def establish_socket_connection(self, address_list):
"""Given a list of database node addresses, establish the socket
connection to the database server. Return a connected socket object.
"""
addrinfo = address_list.peek()
raw_socket = None
last_exception = None
# Failover: loop to try all addresses
while addrinfo:
(family, socktype, proto, canonname, sockaddr) = addrinfo
last_exception = None
# _AddressList filters all addrs to AF_INET and AF_INET6, which both
# have host and port as values 0, 1 in the sockaddr tuple.
host = sockaddr[0]
port = sockaddr[1]
self._logger.info('Establishing connection to host "{0}" on port {1}'.format(host, port))
try:
raw_socket = self.create_socket(family)
raw_socket.connect(sockaddr)
break
except Exception as e:
self._logger.info('Failed to connect to host "{0}" on port {1}: {2}'.format(host, port, e))
last_exception = e
address_list.pop()
addrinfo = address_list.peek()
raw_socket.close()
# all of the addresses failed
if raw_socket is None or last_exception:
err_msg = 'Failed to establish a connection to the primary server or any backup address.'
self._logger.error(err_msg)
raise errors.ConnectionError(err_msg)
return raw_socket
def ssl(self):
return self.socket is not None and isinstance(self.socket, ssl.SSLSocket)
def write(self, message, vsocket=None):
if not isinstance(message, FrontendMessage):
raise TypeError("invalid message: ({0})".format(message))
if vsocket is None:
vsocket = self._socket()
self._logger.debug('=> %s', message)
try:
for data in message.fetch_message():
size = 8192 # Max msg size, consistent with how the server works
pos = 0
while pos < len(data):
sent = vsocket.send(data[pos : pos + size])
if sent == 0:
raise errors.ConnectionError("Couldn't send message: Socket connection broken")
pos += sent
except Exception as e:
self.close_socket()
self._logger.error(str(e))
if isinstance(e, IOError):
raise_from(errors.ConnectionError(str(e)), e)
else:
raise
def close_socket(self):
try:
if self.socket is not None:
self._socket().close()
if self.socket_as_file is not None:
self._socket_as_file().close()
finally:
self.reset_values()
def reset_connection(self):
self.close()
self.startup_connection()
def is_asynchronous_message(self, message):
# Check if it is an asynchronous response message
# Note: ErrorResponse is a subclass of NoticeResponse
return (isinstance(message, messages.ParameterStatus) or
(isinstance(message, messages.NoticeResponse) and
not isinstance(message, messages.ErrorResponse)))
def handle_asynchronous_message(self, message):
if isinstance(message, messages.ParameterStatus):
if message.name == 'protocol_version':
message.value = int(message.value)
self.parameters[message.name] = message.value
elif (isinstance(message, messages.NoticeResponse) and
not isinstance(message, messages.ErrorResponse)):
if getattr(self, 'notice_handler', None) is not None:
self.notice_handler(message)
else:
self._logger.warning(message.error_message())
def read_string(self):
s = bytearray()
while True:
char = self.read_bytes(1)
if char == b'\x00':
break
s.extend(char)
return s
def read_message(self):
while True:
try:
type_ = self.read_bytes(1)
size = unpack('!I', self.read_bytes(4))[0]
if size < 4:
raise errors.MessageError("Bad message size: {0}".format(size))
if type_ == messages.WriteFile.message_id:
# The whole WriteFile message may not be read at here.
# Instead, only the file name and file length is read.
# This is because the message could be too large to read all at once.
f = self.read_string()
filename = f.decode('utf-8')
file_length = unpack('!I', self.read_bytes(4))[0]
size -= 4 + len(f) + 1 + 4
if size != file_length:
raise errors.MessageError("Bad message size: {0}".format(size))
if filename == '':
# If there is no filename, then this is really RETURNREJECTED data, not a rejected file
if file_length % 8 != 0:
raise errors.MessageError("Bad RETURNREJECTED data size: {0}".format(file_length))
data = self.read_bytes(file_length)
message = messages.WriteFile(filename, file_length, data)
else:
# The rest of the message is read later with write_to_disk()
message = messages.WriteFile(filename, file_length)
else:
message = BackendMessage.from_type(type_, self.read_bytes(size - 4))
self._logger.debug('<= %s', message)
self.handle_asynchronous_message(message)
# handle transaction status
if isinstance(message, messages.ReadyForQuery):
self.transaction_status = message.transaction_status
except (SystemError, IOError) as e:
self.close_socket()
# noinspection PyTypeChecker
self._logger.error(e)
raise_from(errors.ConnectionError(str(e)), e)
if not self.is_asynchronous_message(message):
break
return message
def read_expected_message(self, expected_types, error_handler=None):
# Reads a message and does some basic error handling.
# expected_types must be a class (e.g. messages.BindComplete) or a tuple of classes
message = self.read_message()
if isinstance(message, expected_types):
return message
elif isinstance(message, messages.ErrorResponse):
if error_handler is not None:
error_handler(message)
else:
raise errors.DatabaseError(message.error_message())
else:
msg = 'Received unexpected message type: {}. '.format(type(message).__name__)
if isinstance(expected_types, tuple):
msg += 'Expected types: {}'.format(", ".join([t.__name__ for t in expected_types]))
else:
msg += 'Expected type: {}'.format(expected_types.__name__)
self._logger.error(msg)
raise errors.MessageError(msg)
def read_bytes(self, n):
if n == 1:
result = self._socket_as_file().read(1)
if not result:
raise errors.ConnectionError("Connection closed by Vertica")
return result
else:
buf = b""
to_read = n
while to_read > 0:
data = self._socket_as_file().read(to_read)
received = len(data)
if received == 0:
raise errors.ConnectionError("Connection closed by Vertica")
buf += data
to_read -= received
return buf
def send_GSS_response_and_receive_challenge(self, response):
# Send the GSS response data to the vertica server
token = base64.b64decode(response)
self.write(messages.Password(token, messages.Authentication.GSS))
# Receive the challenge from the vertica server
message = self.read_expected_message(messages.Authentication)
if message.code != messages.Authentication.GSS_CONTINUE:
msg = ('Received unexpected message type: Authentication(type={}).'
' Expected type: Authentication(type={})'.format(
message.code, messages.Authentication.GSS_CONTINUE))
self._logger.error(msg)
raise errors.MessageError(msg)
return message.auth_data
def make_GSS_authentication(self):
try:
import kerberos
except ImportError as e:
raise errors.ConnectionError("{}\nCannot make a Kerberos "
"authentication because no Kerberos package is installed. "
"Get it with 'pip install kerberos'.".format(str(e)))
# Set GSS flags
gssflag = (kerberos.GSS_C_DELEG_FLAG | kerberos.GSS_C_MUTUAL_FLAG |
kerberos.GSS_C_SEQUENCE_FLAG | kerberos.GSS_C_REPLAY_FLAG)
# Generate the GSS-style service principal name
service_principal = "{}@{}".format(self.options['kerberos_service_name'],
self.options['kerberos_host_name'])
# Initializes a context object with a service principal
self._logger.info('Initializing a context for GSSAPI client-side '
'authentication with service principal {}'.format(service_principal))
try:
result, context = kerberos.authGSSClientInit(service_principal, gssflags=gssflag)
except kerberos.GSSError as err:
msg = "GSSAPI initialization error: {}".format(str(err))
self._logger.error(msg)
raise errors.KerberosError(msg)
if result != kerberos.AUTH_GSS_COMPLETE:
msg = ('Failed to initialize a context for GSSAPI client-side '
'authentication with service principal {}'.format(service_principal))
self._logger.error(msg)
raise errors.KerberosError(msg)
# Processes GSSAPI client-side steps
try:
challenge = b''
while True:
self._logger.info('Processing a single GSSAPI client-side step')
challenge = base64.b64encode(challenge).decode("utf-8")
result = kerberos.authGSSClientStep(context, challenge)
if result == kerberos.AUTH_GSS_COMPLETE:
self._logger.info('Result: GSSAPI step complete')
break
elif result == kerberos.AUTH_GSS_CONTINUE:
self._logger.info('Result: GSSAPI step continuation')
# Get the response from the last successful GSSAPI client-side step
response = kerberos.authGSSClientResponse(context)
challenge = self.send_GSS_response_and_receive_challenge(response)
else:
msg = "GSSAPI client-side step error status {}".format(result)
self._logger.error(msg)
raise errors.KerberosError(msg)
except kerberos.GSSError as err:
msg = "GSSAPI client-side step error: {}".format(str(err))
self._logger.error(msg)
raise errors.KerberosError(msg)
def startup_connection(self):
user = self.options['user']
database = self.options['database']
session_label = self.options['session_label']
os_user_name = DEFAULT_USER if DEFAULT_USER else ''
password = self.options['password']
self.write(messages.Startup(user, database, session_label, os_user_name))
while True:
message = self.read_message()
if isinstance(message, messages.Authentication):
if message.code == messages.Authentication.OK:
self._logger.info("User {} successfully authenticated"
.format(self.options['user']))
elif message.code == messages.Authentication.CHANGE_PASSWORD:
msg = "The password for user {} has expired".format(self.options['user'])
self._logger.error(msg)
raise errors.ConnectionError(msg)
elif message.code == messages.Authentication.PASSWORD_GRACE:
self._logger.warning('The password for user {} will expire soon.'
' Please consider changing it.'.format(self.options['user']))
elif message.code == messages.Authentication.GSS:
self.make_GSS_authentication()
else:
self.write(messages.Password(password, message.code,
{'user': user,
'salt': getattr(message, 'salt', None),
'usersalt': getattr(message, 'usersalt', None)}))
elif isinstance(message, messages.BackendKeyData):
self.backend_pid = message.pid
self.backend_key = message.key
elif isinstance(message, messages.ReadyForQuery):
break
elif isinstance(message, messages.ErrorResponse):
self._logger.error(message.error_message())
raise errors.ConnectionError(message.error_message())
else:
msg = "Received unexpected startup message: {0}".format(message)
self._logger.error(msg)
raise errors.MessageError(msg)
|
def parse_dsn(dsn):
"""Parse connection string into a dictionary of keywords and values.
Connection string format:
vertica://<user>:<password>@<host>:<port>/<database>?k1=v1&k2=v2&...
"""
url = urlparse(dsn)
if url.scheme != 'vertica':
raise ValueError("Only vertica:// scheme is supported.")
# Ignore blank/invalid values
result = {k: v for k, v in (
('host', url.hostname),
('port', url.port),
('user', url.username),
('password', url.password),
('database', url.path[1:])) if v
}
for key, values in parse_qs(url.query, keep_blank_values=True).items():
# Try to get the last non-blank value in the list of values for each key
for i in reversed(range(len(values))):
value = values[i]
if value != '':
break
if value == '' and key != 'log_path':
# blank values are to be ignored
continue
elif key == 'backup_server_node':
continue
elif key in ('connection_load_balance', 'use_prepared_statements',
'disable_copy_local', 'ssl', 'autocommit'):
lower = value.lower()
if lower in ('true', 'on', '1'):
result[key] = True
elif lower in ('false', 'off', '0'):
result[key] = False
elif key == 'connection_timeout':
result[key] = float(value)
elif key == 'log_level' and value.isdigit():
result[key] = int(value)
else:
result[key] = value
return result
| 91
| 134
|
# Copyright (c) 2018-2022 Micro Focus or one of its affiliates.
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2013-2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function, division, absolute_import
import base64
import logging
import socket
import ssl
import getpass
import uuid
from struct import unpack
from collections import deque, namedtuple
import random
# noinspection PyCompatibility,PyUnresolvedReferences
from six import raise_from, string_types, integer_types, PY2
if PY2:
from urlparse import urlparse, parse_qs
else:
from urllib.parse import urlparse, parse_qs
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Dict, Literal, Optional, Type, Union
from typing_extensions import Self
import vertica_python
from .. import errors
from ..vertica import messages
from ..vertica.cursor import Cursor
from ..vertica.messages.message import BackendMessage, FrontendMessage
from ..vertica.messages.frontend_messages import CancelRequest
from ..vertica.log import VerticaLogging
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 5433
DEFAULT_PASSWORD = ''
DEFAULT_AUTOCOMMIT = False
DEFAULT_BACKUP_SERVER_NODE = []
DEFAULT_KRB_SERVICE_NAME = 'vertica'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_PATH = 'vertica_python.log'
try:
DEFAULT_USER = getpass.getuser()
except Exception as e:
DEFAULT_USER = None
print("WARN: Cannot get the login user name: {}".format(str(e)))
def connect(**kwargs):
# type: (Any) -> Connection
"""Opens a new connection to a Vertica database."""
return Connection(kwargs)
def parse_dsn(dsn):
"""Parse connection string into a dictionary of keywords and values.
Connection string format:
vertica://<user>:<password>@<host>:<port>/<database>?k1=v1&k2=v2&...
"""
url = urlparse(dsn)
if url.scheme != 'vertica':
raise ValueError("Only vertica:// scheme is supported.")
# Ignore blank/invalid values
result = {k: v for k, v in (
('host', url.hostname),
('port', url.port),
('user', url.username),
('password', url.password),
('database', url.path[1:])) if v
}
for key, values in parse_qs(url.query, keep_blank_values=True).items():
# Try to get the last non-blank value in the list of values for each key
for i in reversed(range(len(values))):
value = values[i]
if value != '':
break
if value == '' and key != 'log_path':
# blank values are to be ignored
continue
elif key == 'backup_server_node':
continue
elif key in ('connection_load_balance', 'use_prepared_statements',
'disable_copy_local', 'ssl', 'autocommit'):
lower = value.lower()
if lower in ('true', 'on', '1'):
result[key] = True
elif lower in ('false', 'off', '0'):
result[key] = False
elif key == 'connection_timeout':
result[key] = float(value)
elif key == 'log_level' and value.isdigit():
result[key] = int(value)
else:
result[key] = value
return result
_AddressEntry = namedtuple('_AddressEntry', ['host', 'resolved', 'data'])
class _AddressList(object):
def __init__(self, host, port, backup_nodes, logger):
"""Creates a new deque with the primary host first, followed by any backup hosts"""
self._logger = logger
# Items in address_deque are _AddressEntry values.
# host is the original hostname/ip, used by SSL option check_hostname
# - when resolved is False, data is port
# - when resolved is True, data is the 5-tuple from socket.getaddrinfo
# This allows for lazy resolution. Seek peek() for more.
self.address_deque = deque()
# load primary host into address_deque
self._append(host, port)
# load backup nodes into address_deque
if not isinstance(backup_nodes, list):
err_msg = 'Connection option "backup_server_node" must be a list'
self._logger.error(err_msg)
raise TypeError(err_msg)
# Each item in backup_nodes should be either
# a host name or IP address string (using default port) or
# a (host, port) tuple
for node in backup_nodes:
if isinstance(node, string_types):
self._append(node, DEFAULT_PORT)
elif isinstance(node, tuple) and len(node) == 2:
self._append(node[0], node[1])
else:
err_msg = ('Each item of connection option "backup_server_node"'
' must be a host string or a (host, port) tuple')
self._logger.error(err_msg)
raise TypeError(err_msg)
self._logger.debug('Address list: {0}'.format(list(self.address_deque)))
def _append(self, host, port):
if not isinstance(host, string_types):
err_msg = 'Host must be a string: invalid value: {0}'.format(host)
self._logger.error(err_msg)
raise TypeError(err_msg)
if not isinstance(port, (string_types, integer_types)):
err_msg = 'Port must be an integer or a string: invalid value: {0}'.format(port)
self._logger.error(err_msg)
raise TypeError(err_msg)
elif isinstance(port, string_types):
try:
port = int(port)
except ValueError as e:
err_msg = 'Port "{0}" is not a valid string: {1}'.format(port, e)
self._logger.error(err_msg)
raise ValueError(err_msg)
if port < 0 or port > 65535:
err_msg = 'Invalid port number: {0}'.format(port)
self._logger.error(err_msg)
raise ValueError(err_msg)
self.address_deque.append(_AddressEntry(host=host, resolved=False, data=port))
def push(self, host, port):
self.address_deque.appendleft(_AddressEntry(host=host, resolved=False, data=port))
def pop(self):
self.address_deque.popleft()
def peek(self):
# do lazy DNS resolution, returning the leftmost socket.getaddrinfo result
if len(self.address_deque) == 0:
return None
while len(self.address_deque) > 0:
self._logger.debug('Peek at address list: {0}'.format(list(self.address_deque)))
entry = self.address_deque[0]
if entry.resolved:
# return a resolved sockaddrinfo
return entry.data
else:
# DNS resolve a single host name to multiple IP addresses
self.pop()
# keep host and port info for adding address entry to deque once it has been resolved
host, port = entry.host, entry.data
try:
resolved_hosts = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
except Exception as e:
self._logger.warning('Error resolving host "{0}" on port {1}: {2}'.format(host, port, e))
continue
# add resolved addrinfo (AF_INET and AF_INET6 only) to deque
random.shuffle(resolved_hosts)
for addrinfo in resolved_hosts:
if addrinfo[0] in (socket.AF_INET, socket.AF_INET6):
self.address_deque.appendleft(_AddressEntry(
host=host, resolved=True, data=addrinfo))
return None
def peek_host(self):
# returning the leftmost host result
self._logger.debug('Peek host at address list: {0}'.format(list(self.address_deque)))
if len(self.address_deque) == 0:
return None
return self.address_deque[0].host
def _generate_session_label():
return '{type}-{version}-{id}'.format(
type='vertica-python',
version=vertica_python.__version__,
id=uuid.uuid1()
)
class Connection(object):
def __init__(self, options=None):
# type: (Optional[Dict[str, Any]]) -> None
self.parameters = {}
self.session_id = None
self.backend_pid = None
self.backend_key = None
self.transaction_status = None
self.socket = None
self.socket_as_file = None
options = options or {}
self.options = parse_dsn(options['dsn']) if 'dsn' in options else {}
self.options.update({key: value for key, value in options.items() \
if key == 'log_path' or (key != 'dsn' and value is not None)})
# Set up connection logger
logger_name = 'vertica_{0}_{1}'.format(id(self), str(uuid.uuid4())) # must be a unique value
self._logger = logging.getLogger(logger_name)
if 'log_level' not in self.options and 'log_path' not in self.options:
# logger is disabled by default
self._logger.disabled = True
else:
self.options.setdefault('log_level', DEFAULT_LOG_LEVEL)
self.options.setdefault('log_path', DEFAULT_LOG_PATH)
VerticaLogging.setup_logging(logger_name, self.options['log_path'],
self.options['log_level'], id(self))
self.options.setdefault('host', DEFAULT_HOST)
self.options.setdefault('port', DEFAULT_PORT)
if 'user' not in self.options:
if DEFAULT_USER:
self.options['user'] = DEFAULT_USER
else:
msg = 'Connection option "user" is required'
self._logger.error(msg)
raise KeyError(msg)
self.options.setdefault('database', self.options['user'])
self.options.setdefault('password', DEFAULT_PASSWORD)
self.options.setdefault('autocommit', DEFAULT_AUTOCOMMIT)
self.options.setdefault('session_label', _generate_session_label())
self.options.setdefault('backup_server_node', DEFAULT_BACKUP_SERVER_NODE)
self.options.setdefault('kerberos_service_name', DEFAULT_KRB_SERVICE_NAME)
# Kerberos authentication hostname defaults to the host value here so
# the correct value cannot be overwritten by load balancing or failover
self.options.setdefault('kerberos_host_name', self.options['host'])
self.address_list = _AddressList(self.options['host'], self.options['port'],
self.options['backup_server_node'], self._logger)
# we only support one cursor per connection
self.options.setdefault('unicode_error', None)
self._cursor = Cursor(self, self._logger, cursor_type=None,
unicode_error=self.options['unicode_error'])
# knob for using server-side prepared statements
self.options.setdefault('use_prepared_statements', False)
self._logger.debug('Connection prepared statements is {}'.format(
'enabled' if self.options['use_prepared_statements'] else 'disabled'))
# knob for disabling COPY LOCAL operations
self.options.setdefault('disable_copy_local', False)
self._logger.debug('COPY LOCAL operation is {}'.format(
'disabled' if self.options['disable_copy_local'] else 'enabled'))
self._logger.info('Connecting as user "{}" to database "{}" on host "{}" with port {}'.format(
self.options['user'], self.options['database'],
self.options['host'], self.options['port']))
self.startup_connection()
# Initially, for a new session, autocommit is off
if self.options['autocommit']:
self.autocommit = True
self._logger.info('Connection is ready')
#############################################
# supporting `with` statements
#############################################
def __enter__(self):
# type: () -> Self
return self
def __exit__(self, type_, value, traceback):
self.close()
#############################################
# dbapi methods
#############################################
def close(self):
self._logger.info('Close the connection')
try:
self.write(messages.Terminate())
finally:
self.close_socket()
def commit(self):
if self.closed():
raise errors.ConnectionError('Connection is closed')
cur = self.cursor()
cur.execute('COMMIT;')
def rollback(self):
if self.closed():
raise errors.ConnectionError('Connection is closed')
cur = self.cursor()
cur.execute('ROLLBACK;')
def cursor(self, cursor_type=None):
# type: (Self, Optional[Union[Literal['list', 'dict'], Type[list[Any]], Type[dict[Any, Any]]]]) -> Cursor
if self.closed():
raise errors.ConnectionError('Connection is closed')
if self._cursor.closed():
self._cursor._closed = False
# let user change type if they want?
self._cursor.cursor_type = cursor_type
return self._cursor
#############################################
# non-dbapi methods
#############################################
@property
def autocommit(self):
"""Read the connection's AUTOCOMMIT setting from cache"""
return self.parameters.get('auto_commit', 'off') == 'on'
@autocommit.setter
def autocommit(self, value):
"""Change the connection's AUTOCOMMIT setting"""
if self.autocommit is value:
return
val = 'on' if value else 'off'
cur = self.cursor()
cur.execute('SET SESSION AUTOCOMMIT TO {}'.format(val), use_prepared_statements=False)
cur.fetchall() # check for errors and update the cache
def cancel(self):
"""Cancel the current database operation. This can be called from a
different thread than the one currently executing a database operation.
"""
if self.closed():
raise errors.ConnectionError('Connection is closed')
self._logger.info('Canceling the current database operation')
# Must create a new socket connection to the server
temp_socket = self.establish_socket_connection(self.address_list)
self.write(CancelRequest(self.backend_pid, self.backend_key), temp_socket)
temp_socket.close()
self._logger.info('Cancel request issued')
def opened(self):
return (self.socket is not None
and self.backend_pid is not None
and self.transaction_status is not None)
def closed(self):
return not self.opened()
def __str__(self):
safe_options = {key: value for key, value in self.options.items() if key != 'password'}
s1 = "<Vertica.Connection:{0} parameters={1} backend_pid={2}, ".format(
id(self), self.parameters, self.backend_pid)
s2 = "backend_key={0}, transaction_status={1}, socket={2}, options={3}>".format(
self.backend_key, self.transaction_status, self.socket, safe_options)
return ''.join([s1, s2])
#############################################
# internal
#############################################
def reset_values(self):
self.parameters = {}
self.session_id = None
self.backend_pid = None
self.backend_key = None
self.transaction_status = None
self.socket = None
self.socket_as_file = None
self.address_list = _AddressList(self.options['host'], self.options['port'],
self.options['backup_server_node'], self._logger)
def _socket(self):
if self.socket:
return self.socket
# the initial establishment of the client connection
raw_socket = self.establish_socket_connection(self.address_list)
# enable load balancing
load_balance_options = self.options.get('connection_load_balance')
self._logger.debug('Connection load balance option is {0}'.format(
'enabled' if load_balance_options else 'disabled'))
if load_balance_options:
raw_socket = self.balance_load(raw_socket)
# enable SSL
ssl_options = self.options.get('ssl')
self._logger.debug('SSL option is {0}'.format('enabled' if ssl_options else 'disabled'))
if ssl_options:
raw_socket = self.enable_ssl(raw_socket, ssl_options)
self.socket = raw_socket
return self.socket
def _socket_as_file(self):
if self.socket_as_file is None:
self.socket_as_file = self._socket().makefile('rb')
return self.socket_as_file
def create_socket(self, family):
"""Create a TCP socket object"""
raw_socket = socket.socket(family, socket.SOCK_STREAM)
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
connection_timeout = self.options.get('connection_timeout')
if connection_timeout is not None:
self._logger.debug('Set socket connection timeout: {0}'.format(connection_timeout))
raw_socket.settimeout(connection_timeout)
return raw_socket
def balance_load(self, raw_socket):
# Send load balance request and read server response
self._logger.debug('=> %s', messages.LoadBalanceRequest())
raw_socket.sendall(messages.LoadBalanceRequest().get_message())
response = raw_socket.recv(1)
if response == b'Y':
size = unpack('!I', raw_socket.recv(4))[0]
if size < 4:
err_msg = "Bad message size: {0}".format(size)
self._logger.error(err_msg)
raise errors.MessageError(err_msg)
res = BackendMessage.from_type(type_=response, data=raw_socket.recv(size - 4))
self._logger.debug('<= %s', res)
host = res.get_host()
port = res.get_port()
self._logger.info('Load balancing to host "{0}" on port {1}'.format(host, port))
peer = raw_socket.getpeername()
socket_host, socket_port = peer[0], peer[1]
if host == socket_host and port == socket_port:
self._logger.info('Already connecting to host "{0}" on port {1}. Ignore load balancing.'.format(host, port))
return raw_socket
# Push the new host onto the address list before connecting again. Note that this
# will leave the originally-specified host as the first failover possibility.
self.address_list.push(host, port)
raw_socket.close()
raw_socket = self.establish_socket_connection(self.address_list)
else:
self._logger.debug('<= LoadBalanceResponse: %s', response)
self._logger.warning("Load balancing requested but not supported by server")
return raw_socket
def enable_ssl(self, raw_socket, ssl_options):
# Send SSL request and read server response
self._logger.debug('=> %s', messages.SslRequest())
raw_socket.sendall(messages.SslRequest().get_message())
response = raw_socket.recv(1)
self._logger.debug('<= SslResponse: %s', response)
if response == b'S':
self._logger.info('Enabling SSL')
try:
if isinstance(ssl_options, ssl.SSLContext):
server_host = self.address_list.peek_host()
if server_host is None: # This should not happen
msg = 'Cannot get the connected server host while enabling SSL'
self._logger.error(msg)
raise errors.ConnectionError(msg)
raw_socket = ssl_options.wrap_socket(raw_socket, server_hostname=server_host)
else:
raw_socket = ssl.wrap_socket(raw_socket)
except ssl.CertificateError as e:
raise_from(errors.ConnectionError(str(e)), e)
except ssl.SSLError as e:
raise_from(errors.ConnectionError(str(e)), e)
else:
err_msg = "SSL requested but not supported by server"
self._logger.error(err_msg)
raise errors.SSLNotSupported(err_msg)
return raw_socket
def establish_socket_connection(self, address_list):
"""Given a list of database node addresses, establish the socket
connection to the database server. Return a connected socket object.
"""
addrinfo = address_list.peek()
raw_socket = None
last_exception = None
# Failover: loop to try all addresses
while addrinfo:
(family, socktype, proto, canonname, sockaddr) = addrinfo
last_exception = None
# _AddressList filters all addrs to AF_INET and AF_INET6, which both
# have host and port as values 0, 1 in the sockaddr tuple.
host = sockaddr[0]
port = sockaddr[1]
self._logger.info('Establishing connection to host "{0}" on port {1}'.format(host, port))
try:
raw_socket = self.create_socket(family)
raw_socket.connect(sockaddr)
break
except Exception as e:
self._logger.info('Failed to connect to host "{0}" on port {1}: {2}'.format(host, port, e))
last_exception = e
address_list.pop()
addrinfo = address_list.peek()
raw_socket.close()
# all of the addresses failed
if raw_socket is None or last_exception:
err_msg = 'Failed to establish a connection to the primary server or any backup address.'
self._logger.error(err_msg)
raise errors.ConnectionError(err_msg)
return raw_socket
def ssl(self):
return self.socket is not None and isinstance(self.socket, ssl.SSLSocket)
def write(self, message, vsocket=None):
if not isinstance(message, FrontendMessage):
raise TypeError("invalid message: ({0})".format(message))
if vsocket is None:
vsocket = self._socket()
self._logger.debug('=> %s', message)
try:
for data in message.fetch_message():
size = 8192 # Max msg size, consistent with how the server works
pos = 0
while pos < len(data):
sent = vsocket.send(data[pos : pos + size])
if sent == 0:
raise errors.ConnectionError("Couldn't send message: Socket connection broken")
pos += sent
except Exception as e:
self.close_socket()
self._logger.error(str(e))
if isinstance(e, IOError):
raise_from(errors.ConnectionError(str(e)), e)
else:
raise
def close_socket(self):
try:
if self.socket is not None:
self._socket().close()
if self.socket_as_file is not None:
self._socket_as_file().close()
finally:
self.reset_values()
def reset_connection(self):
self.close()
self.startup_connection()
def is_asynchronous_message(self, message):
# Check if it is an asynchronous response message
# Note: ErrorResponse is a subclass of NoticeResponse
return (isinstance(message, messages.ParameterStatus) or
(isinstance(message, messages.NoticeResponse) and
not isinstance(message, messages.ErrorResponse)))
def handle_asynchronous_message(self, message):
if isinstance(message, messages.ParameterStatus):
if message.name == 'protocol_version':
message.value = int(message.value)
self.parameters[message.name] = message.value
elif (isinstance(message, messages.NoticeResponse) and
not isinstance(message, messages.ErrorResponse)):
if getattr(self, 'notice_handler', None) is not None:
self.notice_handler(message)
else:
self._logger.warning(message.error_message())
def read_string(self):
s = bytearray()
while True:
char = self.read_bytes(1)
if char == b'\x00':
break
s.extend(char)
return s
def read_message(self):
while True:
try:
type_ = self.read_bytes(1)
size = unpack('!I', self.read_bytes(4))[0]
if size < 4:
raise errors.MessageError("Bad message size: {0}".format(size))
if type_ == messages.WriteFile.message_id:
# The whole WriteFile message may not be read at here.
# Instead, only the file name and file length is read.
# This is because the message could be too large to read all at once.
f = self.read_string()
filename = f.decode('utf-8')
file_length = unpack('!I', self.read_bytes(4))[0]
size -= 4 + len(f) + 1 + 4
if size != file_length:
raise errors.MessageError("Bad message size: {0}".format(size))
if filename == '':
# If there is no filename, then this is really RETURNREJECTED data, not a rejected file
if file_length % 8 != 0:
raise errors.MessageError("Bad RETURNREJECTED data size: {0}".format(file_length))
data = self.read_bytes(file_length)
message = messages.WriteFile(filename, file_length, data)
else:
# The rest of the message is read later with write_to_disk()
message = messages.WriteFile(filename, file_length)
else:
message = BackendMessage.from_type(type_, self.read_bytes(size - 4))
self._logger.debug('<= %s', message)
self.handle_asynchronous_message(message)
# handle transaction status
if isinstance(message, messages.ReadyForQuery):
self.transaction_status = message.transaction_status
except (SystemError, IOError) as e:
self.close_socket()
# noinspection PyTypeChecker
self._logger.error(e)
raise_from(errors.ConnectionError(str(e)), e)
if not self.is_asynchronous_message(message):
break
return message
def read_expected_message(self, expected_types, error_handler=None):
# Reads a message and does some basic error handling.
# expected_types must be a class (e.g. messages.BindComplete) or a tuple of classes
message = self.read_message()
if isinstance(message, expected_types):
return message
elif isinstance(message, messages.ErrorResponse):
if error_handler is not None:
error_handler(message)
else:
raise errors.DatabaseError(message.error_message())
else:
msg = 'Received unexpected message type: {}. '.format(type(message).__name__)
if isinstance(expected_types, tuple):
msg += 'Expected types: {}'.format(", ".join([t.__name__ for t in expected_types]))
else:
msg += 'Expected type: {}'.format(expected_types.__name__)
self._logger.error(msg)
raise errors.MessageError(msg)
def read_bytes(self, n):
if n == 1:
result = self._socket_as_file().read(1)
if not result:
raise errors.ConnectionError("Connection closed by Vertica")
return result
else:
buf = b""
to_read = n
while to_read > 0:
data = self._socket_as_file().read(to_read)
received = len(data)
if received == 0:
raise errors.ConnectionError("Connection closed by Vertica")
buf += data
to_read -= received
return buf
def send_GSS_response_and_receive_challenge(self, response):
# Send the GSS response data to the vertica server
token = base64.b64decode(response)
self.write(messages.Password(token, messages.Authentication.GSS))
# Receive the challenge from the vertica server
message = self.read_expected_message(messages.Authentication)
if message.code != messages.Authentication.GSS_CONTINUE:
msg = ('Received unexpected message type: Authentication(type={}).'
' Expected type: Authentication(type={})'.format(
message.code, messages.Authentication.GSS_CONTINUE))
self._logger.error(msg)
raise errors.MessageError(msg)
return message.auth_data
def make_GSS_authentication(self):
try:
import kerberos
except ImportError as e:
raise errors.ConnectionError("{}\nCannot make a Kerberos "
"authentication because no Kerberos package is installed. "
"Get it with 'pip install kerberos'.".format(str(e)))
# Set GSS flags
gssflag = (kerberos.GSS_C_DELEG_FLAG | kerberos.GSS_C_MUTUAL_FLAG |
kerberos.GSS_C_SEQUENCE_FLAG | kerberos.GSS_C_REPLAY_FLAG)
# Generate the GSS-style service principal name
service_principal = "{}@{}".format(self.options['kerberos_service_name'],
self.options['kerberos_host_name'])
# Initializes a context object with a service principal
self._logger.info('Initializing a context for GSSAPI client-side '
'authentication with service principal {}'.format(service_principal))
try:
result, context = kerberos.authGSSClientInit(service_principal, gssflags=gssflag)
except kerberos.GSSError as err:
msg = "GSSAPI initialization error: {}".format(str(err))
self._logger.error(msg)
raise errors.KerberosError(msg)
if result != kerberos.AUTH_GSS_COMPLETE:
msg = ('Failed to initialize a context for GSSAPI client-side '
'authentication with service principal {}'.format(service_principal))
self._logger.error(msg)
raise errors.KerberosError(msg)
# Processes GSSAPI client-side steps
try:
challenge = b''
while True:
self._logger.info('Processing a single GSSAPI client-side step')
challenge = base64.b64encode(challenge).decode("utf-8")
result = kerberos.authGSSClientStep(context, challenge)
if result == kerberos.AUTH_GSS_COMPLETE:
self._logger.info('Result: GSSAPI step complete')
break
elif result == kerberos.AUTH_GSS_CONTINUE:
self._logger.info('Result: GSSAPI step continuation')
# Get the response from the last successful GSSAPI client-side step
response = kerberos.authGSSClientResponse(context)
challenge = self.send_GSS_response_and_receive_challenge(response)
else:
msg = "GSSAPI client-side step error status {}".format(result)
self._logger.error(msg)
raise errors.KerberosError(msg)
except kerberos.GSSError as err:
msg = "GSSAPI client-side step error: {}".format(str(err))
self._logger.error(msg)
raise errors.KerberosError(msg)
def startup_connection(self):
user = self.options['user']
database = self.options['database']
session_label = self.options['session_label']
os_user_name = DEFAULT_USER if DEFAULT_USER else ''
password = self.options['password']
self.write(messages.Startup(user, database, session_label, os_user_name))
while True:
message = self.read_message()
if isinstance(message, messages.Authentication):
if message.code == messages.Authentication.OK:
self._logger.info("User {} successfully authenticated"
.format(self.options['user']))
elif message.code == messages.Authentication.CHANGE_PASSWORD:
msg = "The password for user {} has expired".format(self.options['user'])
self._logger.error(msg)
raise errors.ConnectionError(msg)
elif message.code == messages.Authentication.PASSWORD_GRACE:
self._logger.warning('The password for user {} will expire soon.'
' Please consider changing it.'.format(self.options['user']))
elif message.code == messages.Authentication.GSS:
self.make_GSS_authentication()
else:
self.write(messages.Password(password, message.code,
{'user': user,
'salt': getattr(message, 'salt', None),
'usersalt': getattr(message, 'usersalt', None)}))
elif isinstance(message, messages.BackendKeyData):
self.backend_pid = message.pid
self.backend_key = message.key
elif isinstance(message, messages.ReadyForQuery):
break
elif isinstance(message, messages.ErrorResponse):
self._logger.error(message.error_message())
raise errors.ConnectionError(message.error_message())
else:
msg = "Received unexpected startup message: {0}".format(message)
self._logger.error(msg)
raise errors.MessageError(msg)
|
block_group
|
Builds one group of blocks.
Args:
inputs: a `Tensor` of size `[batch, channels, height, width]`.
filters: an `int` number of filters for the first two convolutions.
strides: an `int` block stride. If greater than 1, this block will
ultimately downsample the input.
use_projection: a `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
block_fn: the `function` for the block to use within the model
block_repeats: an `int` number of blocks to repeat in the group.
batch_norm_relu: an operation that is added after convolutions, including a
batch norm layer and an optional relu activation.
dropblock: a drop block layer that is added after convluations. Note that
the default implementation does not apply any drop block.
drop_connect_rate: a 'float' number that specifies the drop connection rate
of the block. Note that the default `None` means no drop connection is
applied.
data_format: a `str` that specifies the data format.
name: a `str` name for the Tensor output of the block layer.
is_training: a `bool` if True, the model is in training mode.
Returns:
The output `Tensor` of the block layer.
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the post-activation form of Residual Networks.
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from modeling.architecture import nn_blocks
from modeling.architecture import nn_ops
def get_drop_connect_rate(init_rate, block_num, total_blocks):
"""Get drop connect rate for the ith block."""
if init_rate is not None:
return init_rate * float(block_num) / total_blocks
else:
return None
# MASKED: block_group function (lines 42-103)
class Resnet(object):
"""Class to build ResNet family model."""
def __init__(self,
resnet_depth,
dropblock=nn_ops.Dropblock(),
batch_norm_relu=nn_ops.BatchNormRelu(),
init_drop_connect_rate=None,
data_format='channels_last'):
"""ResNet initialization function.
Args:
resnet_depth: `int` depth of ResNet backbone model.
dropblock: a dropblock layer.
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
init_drop_connect_rate: a 'float' number that specifies the initial drop
connection rate. Note that the default `None` means no drop connection
is applied.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
"""
self._resnet_depth = resnet_depth
self._dropblock = dropblock
self._batch_norm_relu = batch_norm_relu
self._init_drop_connect_rate = init_drop_connect_rate
self._data_format = data_format
model_params = {
10: {'block': nn_blocks.residual_block, 'layers': [1, 1, 1, 1]},
18: {'block': nn_blocks.residual_block, 'layers': [2, 2, 2, 2]},
34: {'block': nn_blocks.residual_block, 'layers': [3, 4, 6, 3]},
50: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 6, 3]},
101: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 23, 3]},
152: {'block': nn_blocks.bottleneck_block, 'layers': [3, 8, 36, 3]},
200: {'block': nn_blocks.bottleneck_block, 'layers': [3, 24, 36, 3]}
}
if resnet_depth not in model_params:
valid_resnet_depths = ', '.join(
[str(depth) for depth in sorted(model_params.keys())])
raise ValueError(
'The resnet_depth should be in [%s]. Not a valid resnet_depth:'%(
valid_resnet_depths), self._resnet_depth)
params = model_params[resnet_depth]
self._resnet_fn = self.resnet_v1_generator(
params['block'], params['layers'])
def __call__(self, inputs, is_training=False):
"""Returns the ResNet model for a given size and number of output classes.
Args:
inputs: a `Tesnor` with shape [batch_size, height, width, 3] representing
a batch of images.
is_training: `bool` if True, the model is in training mode.
Returns:
a `dict` containing `int` keys for continuous feature levels [2, 3, 4, 5].
The values are corresponding feature hierarchy in ResNet with shape
[batch_size, height_l, width_l, num_filters].
"""
with tf.variable_scope('resnet%s' % self._resnet_depth):
return self._resnet_fn(inputs, is_training)
def resnet_v1_generator(self, block_fn, layers):
"""Generator for ResNet v1 models.
Args:
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layers: list of 4 `int`s denoting the number of blocks to include in each
of the 4 block groups. Each group consists of blocks that take inputs of
the same resolution.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
"""
def model(inputs, is_training=False):
"""Creation of the model graph."""
inputs = nn_ops.conv2d_fixed_padding(
inputs=inputs, filters=64, kernel_size=7, strides=2,
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = self._batch_norm_relu(inputs, is_training=is_training)
inputs = tf.layers.max_pooling2d(
inputs=inputs, pool_size=3, strides=2, padding='SAME',
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
c2 = block_group(
inputs=inputs,
filters=64,
strides=1,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[0],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 2, 5),
name='block_group1',
is_training=is_training)
c3 = block_group(
inputs=c2,
filters=128,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[1],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 3, 5),
name='block_group2',
is_training=is_training)
c4 = block_group(
inputs=c3,
filters=256,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[2],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 4, 5),
name='block_group3',
is_training=is_training)
c5 = block_group(
inputs=c4,
filters=512,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[3],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 5, 5),
name='block_group4',
is_training=is_training)
return {2: c2, 3: c3, 4: c4, 5: c5}
return model
|
def block_group(inputs,
filters,
strides,
use_projection,
block_fn,
block_repeats,
batch_norm_relu=nn_ops.BatchNormRelu(),
dropblock=nn_ops.Dropblock(),
drop_connect_rate=None,
data_format='channels_last',
name=None,
is_training=False):
"""Builds one group of blocks.
Args:
inputs: a `Tensor` of size `[batch, channels, height, width]`.
filters: an `int` number of filters for the first two convolutions.
strides: an `int` block stride. If greater than 1, this block will
ultimately downsample the input.
use_projection: a `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
block_fn: the `function` for the block to use within the model
block_repeats: an `int` number of blocks to repeat in the group.
batch_norm_relu: an operation that is added after convolutions, including a
batch norm layer and an optional relu activation.
dropblock: a drop block layer that is added after convluations. Note that
the default implementation does not apply any drop block.
drop_connect_rate: a 'float' number that specifies the drop connection rate
of the block. Note that the default `None` means no drop connection is
applied.
data_format: a `str` that specifies the data format.
name: a `str` name for the Tensor output of the block layer.
is_training: a `bool` if True, the model is in training mode.
Returns:
The output `Tensor` of the block layer.
"""
# Only the first block per block_group uses projection shortcut and strides.
inputs = block_fn(
inputs,
filters,
strides,
use_projection=use_projection,
batch_norm_relu=batch_norm_relu,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
for _ in range(1, block_repeats):
inputs = block_fn(
inputs,
filters,
1,
use_projection=False,
batch_norm_relu=batch_norm_relu,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
return tf.identity(inputs, name)
| 42
| 103
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the post-activation form of Residual Networks.
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from modeling.architecture import nn_blocks
from modeling.architecture import nn_ops
def get_drop_connect_rate(init_rate, block_num, total_blocks):
"""Get drop connect rate for the ith block."""
if init_rate is not None:
return init_rate * float(block_num) / total_blocks
else:
return None
def block_group(inputs,
filters,
strides,
use_projection,
block_fn,
block_repeats,
batch_norm_relu=nn_ops.BatchNormRelu(),
dropblock=nn_ops.Dropblock(),
drop_connect_rate=None,
data_format='channels_last',
name=None,
is_training=False):
"""Builds one group of blocks.
Args:
inputs: a `Tensor` of size `[batch, channels, height, width]`.
filters: an `int` number of filters for the first two convolutions.
strides: an `int` block stride. If greater than 1, this block will
ultimately downsample the input.
use_projection: a `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
block_fn: the `function` for the block to use within the model
block_repeats: an `int` number of blocks to repeat in the group.
batch_norm_relu: an operation that is added after convolutions, including a
batch norm layer and an optional relu activation.
dropblock: a drop block layer that is added after convluations. Note that
the default implementation does not apply any drop block.
drop_connect_rate: a 'float' number that specifies the drop connection rate
of the block. Note that the default `None` means no drop connection is
applied.
data_format: a `str` that specifies the data format.
name: a `str` name for the Tensor output of the block layer.
is_training: a `bool` if True, the model is in training mode.
Returns:
The output `Tensor` of the block layer.
"""
# Only the first block per block_group uses projection shortcut and strides.
inputs = block_fn(
inputs,
filters,
strides,
use_projection=use_projection,
batch_norm_relu=batch_norm_relu,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
for _ in range(1, block_repeats):
inputs = block_fn(
inputs,
filters,
1,
use_projection=False,
batch_norm_relu=batch_norm_relu,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
return tf.identity(inputs, name)
class Resnet(object):
"""Class to build ResNet family model."""
def __init__(self,
resnet_depth,
dropblock=nn_ops.Dropblock(),
batch_norm_relu=nn_ops.BatchNormRelu(),
init_drop_connect_rate=None,
data_format='channels_last'):
"""ResNet initialization function.
Args:
resnet_depth: `int` depth of ResNet backbone model.
dropblock: a dropblock layer.
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
init_drop_connect_rate: a 'float' number that specifies the initial drop
connection rate. Note that the default `None` means no drop connection
is applied.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
"""
self._resnet_depth = resnet_depth
self._dropblock = dropblock
self._batch_norm_relu = batch_norm_relu
self._init_drop_connect_rate = init_drop_connect_rate
self._data_format = data_format
model_params = {
10: {'block': nn_blocks.residual_block, 'layers': [1, 1, 1, 1]},
18: {'block': nn_blocks.residual_block, 'layers': [2, 2, 2, 2]},
34: {'block': nn_blocks.residual_block, 'layers': [3, 4, 6, 3]},
50: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 6, 3]},
101: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 23, 3]},
152: {'block': nn_blocks.bottleneck_block, 'layers': [3, 8, 36, 3]},
200: {'block': nn_blocks.bottleneck_block, 'layers': [3, 24, 36, 3]}
}
if resnet_depth not in model_params:
valid_resnet_depths = ', '.join(
[str(depth) for depth in sorted(model_params.keys())])
raise ValueError(
'The resnet_depth should be in [%s]. Not a valid resnet_depth:'%(
valid_resnet_depths), self._resnet_depth)
params = model_params[resnet_depth]
self._resnet_fn = self.resnet_v1_generator(
params['block'], params['layers'])
def __call__(self, inputs, is_training=False):
"""Returns the ResNet model for a given size and number of output classes.
Args:
inputs: a `Tesnor` with shape [batch_size, height, width, 3] representing
a batch of images.
is_training: `bool` if True, the model is in training mode.
Returns:
a `dict` containing `int` keys for continuous feature levels [2, 3, 4, 5].
The values are corresponding feature hierarchy in ResNet with shape
[batch_size, height_l, width_l, num_filters].
"""
with tf.variable_scope('resnet%s' % self._resnet_depth):
return self._resnet_fn(inputs, is_training)
def resnet_v1_generator(self, block_fn, layers):
"""Generator for ResNet v1 models.
Args:
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layers: list of 4 `int`s denoting the number of blocks to include in each
of the 4 block groups. Each group consists of blocks that take inputs of
the same resolution.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
"""
def model(inputs, is_training=False):
"""Creation of the model graph."""
inputs = nn_ops.conv2d_fixed_padding(
inputs=inputs, filters=64, kernel_size=7, strides=2,
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = self._batch_norm_relu(inputs, is_training=is_training)
inputs = tf.layers.max_pooling2d(
inputs=inputs, pool_size=3, strides=2, padding='SAME',
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
c2 = block_group(
inputs=inputs,
filters=64,
strides=1,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[0],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 2, 5),
name='block_group1',
is_training=is_training)
c3 = block_group(
inputs=c2,
filters=128,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[1],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 3, 5),
name='block_group2',
is_training=is_training)
c4 = block_group(
inputs=c3,
filters=256,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[2],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 4, 5),
name='block_group3',
is_training=is_training)
c5 = block_group(
inputs=c4,
filters=512,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[3],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 5, 5),
name='block_group4',
is_training=is_training)
return {2: c2, 3: c3, 4: c4, 5: c5}
return model
|
__init__
|
ResNet initialization function.
Args:
resnet_depth: `int` depth of ResNet backbone model.
dropblock: a dropblock layer.
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
init_drop_connect_rate: a 'float' number that specifies the initial drop
connection rate. Note that the default `None` means no drop connection
is applied.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the post-activation form of Residual Networks.
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from modeling.architecture import nn_blocks
from modeling.architecture import nn_ops
def get_drop_connect_rate(init_rate, block_num, total_blocks):
"""Get drop connect rate for the ith block."""
if init_rate is not None:
return init_rate * float(block_num) / total_blocks
else:
return None
def block_group(inputs,
filters,
strides,
use_projection,
block_fn,
block_repeats,
batch_norm_relu=nn_ops.BatchNormRelu(),
dropblock=nn_ops.Dropblock(),
drop_connect_rate=None,
data_format='channels_last',
name=None,
is_training=False):
"""Builds one group of blocks.
Args:
inputs: a `Tensor` of size `[batch, channels, height, width]`.
filters: an `int` number of filters for the first two convolutions.
strides: an `int` block stride. If greater than 1, this block will
ultimately downsample the input.
use_projection: a `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
block_fn: the `function` for the block to use within the model
block_repeats: an `int` number of blocks to repeat in the group.
batch_norm_relu: an operation that is added after convolutions, including a
batch norm layer and an optional relu activation.
dropblock: a drop block layer that is added after convluations. Note that
the default implementation does not apply any drop block.
drop_connect_rate: a 'float' number that specifies the drop connection rate
of the block. Note that the default `None` means no drop connection is
applied.
data_format: a `str` that specifies the data format.
name: a `str` name for the Tensor output of the block layer.
is_training: a `bool` if True, the model is in training mode.
Returns:
The output `Tensor` of the block layer.
"""
# Only the first block per block_group uses projection shortcut and strides.
inputs = block_fn(
inputs,
filters,
strides,
use_projection=use_projection,
batch_norm_relu=batch_norm_relu,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
for _ in range(1, block_repeats):
inputs = block_fn(
inputs,
filters,
1,
use_projection=False,
batch_norm_relu=batch_norm_relu,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
return tf.identity(inputs, name)
class Resnet(object):
"""Class to build ResNet family model."""
# MASKED: __init__ function (lines 109-154)
def __call__(self, inputs, is_training=False):
"""Returns the ResNet model for a given size and number of output classes.
Args:
inputs: a `Tesnor` with shape [batch_size, height, width, 3] representing
a batch of images.
is_training: `bool` if True, the model is in training mode.
Returns:
a `dict` containing `int` keys for continuous feature levels [2, 3, 4, 5].
The values are corresponding feature hierarchy in ResNet with shape
[batch_size, height_l, width_l, num_filters].
"""
with tf.variable_scope('resnet%s' % self._resnet_depth):
return self._resnet_fn(inputs, is_training)
def resnet_v1_generator(self, block_fn, layers):
"""Generator for ResNet v1 models.
Args:
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layers: list of 4 `int`s denoting the number of blocks to include in each
of the 4 block groups. Each group consists of blocks that take inputs of
the same resolution.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
"""
def model(inputs, is_training=False):
"""Creation of the model graph."""
inputs = nn_ops.conv2d_fixed_padding(
inputs=inputs, filters=64, kernel_size=7, strides=2,
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = self._batch_norm_relu(inputs, is_training=is_training)
inputs = tf.layers.max_pooling2d(
inputs=inputs, pool_size=3, strides=2, padding='SAME',
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
c2 = block_group(
inputs=inputs,
filters=64,
strides=1,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[0],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 2, 5),
name='block_group1',
is_training=is_training)
c3 = block_group(
inputs=c2,
filters=128,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[1],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 3, 5),
name='block_group2',
is_training=is_training)
c4 = block_group(
inputs=c3,
filters=256,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[2],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 4, 5),
name='block_group3',
is_training=is_training)
c5 = block_group(
inputs=c4,
filters=512,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[3],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 5, 5),
name='block_group4',
is_training=is_training)
return {2: c2, 3: c3, 4: c4, 5: c5}
return model
|
def __init__(self,
resnet_depth,
dropblock=nn_ops.Dropblock(),
batch_norm_relu=nn_ops.BatchNormRelu(),
init_drop_connect_rate=None,
data_format='channels_last'):
"""ResNet initialization function.
Args:
resnet_depth: `int` depth of ResNet backbone model.
dropblock: a dropblock layer.
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
init_drop_connect_rate: a 'float' number that specifies the initial drop
connection rate. Note that the default `None` means no drop connection
is applied.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
"""
self._resnet_depth = resnet_depth
self._dropblock = dropblock
self._batch_norm_relu = batch_norm_relu
self._init_drop_connect_rate = init_drop_connect_rate
self._data_format = data_format
model_params = {
10: {'block': nn_blocks.residual_block, 'layers': [1, 1, 1, 1]},
18: {'block': nn_blocks.residual_block, 'layers': [2, 2, 2, 2]},
34: {'block': nn_blocks.residual_block, 'layers': [3, 4, 6, 3]},
50: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 6, 3]},
101: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 23, 3]},
152: {'block': nn_blocks.bottleneck_block, 'layers': [3, 8, 36, 3]},
200: {'block': nn_blocks.bottleneck_block, 'layers': [3, 24, 36, 3]}
}
if resnet_depth not in model_params:
valid_resnet_depths = ', '.join(
[str(depth) for depth in sorted(model_params.keys())])
raise ValueError(
'The resnet_depth should be in [%s]. Not a valid resnet_depth:'%(
valid_resnet_depths), self._resnet_depth)
params = model_params[resnet_depth]
self._resnet_fn = self.resnet_v1_generator(
params['block'], params['layers'])
| 109
| 154
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the post-activation form of Residual Networks.
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from modeling.architecture import nn_blocks
from modeling.architecture import nn_ops
def get_drop_connect_rate(init_rate, block_num, total_blocks):
"""Get drop connect rate for the ith block."""
if init_rate is not None:
return init_rate * float(block_num) / total_blocks
else:
return None
def block_group(inputs,
filters,
strides,
use_projection,
block_fn,
block_repeats,
batch_norm_relu=nn_ops.BatchNormRelu(),
dropblock=nn_ops.Dropblock(),
drop_connect_rate=None,
data_format='channels_last',
name=None,
is_training=False):
"""Builds one group of blocks.
Args:
inputs: a `Tensor` of size `[batch, channels, height, width]`.
filters: an `int` number of filters for the first two convolutions.
strides: an `int` block stride. If greater than 1, this block will
ultimately downsample the input.
use_projection: a `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
block_fn: the `function` for the block to use within the model
block_repeats: an `int` number of blocks to repeat in the group.
batch_norm_relu: an operation that is added after convolutions, including a
batch norm layer and an optional relu activation.
dropblock: a drop block layer that is added after convluations. Note that
the default implementation does not apply any drop block.
drop_connect_rate: a 'float' number that specifies the drop connection rate
of the block. Note that the default `None` means no drop connection is
applied.
data_format: a `str` that specifies the data format.
name: a `str` name for the Tensor output of the block layer.
is_training: a `bool` if True, the model is in training mode.
Returns:
The output `Tensor` of the block layer.
"""
# Only the first block per block_group uses projection shortcut and strides.
inputs = block_fn(
inputs,
filters,
strides,
use_projection=use_projection,
batch_norm_relu=batch_norm_relu,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
for _ in range(1, block_repeats):
inputs = block_fn(
inputs,
filters,
1,
use_projection=False,
batch_norm_relu=batch_norm_relu,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
return tf.identity(inputs, name)
class Resnet(object):
"""Class to build ResNet family model."""
def __init__(self,
resnet_depth,
dropblock=nn_ops.Dropblock(),
batch_norm_relu=nn_ops.BatchNormRelu(),
init_drop_connect_rate=None,
data_format='channels_last'):
"""ResNet initialization function.
Args:
resnet_depth: `int` depth of ResNet backbone model.
dropblock: a dropblock layer.
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
init_drop_connect_rate: a 'float' number that specifies the initial drop
connection rate. Note that the default `None` means no drop connection
is applied.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
"""
self._resnet_depth = resnet_depth
self._dropblock = dropblock
self._batch_norm_relu = batch_norm_relu
self._init_drop_connect_rate = init_drop_connect_rate
self._data_format = data_format
model_params = {
10: {'block': nn_blocks.residual_block, 'layers': [1, 1, 1, 1]},
18: {'block': nn_blocks.residual_block, 'layers': [2, 2, 2, 2]},
34: {'block': nn_blocks.residual_block, 'layers': [3, 4, 6, 3]},
50: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 6, 3]},
101: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 23, 3]},
152: {'block': nn_blocks.bottleneck_block, 'layers': [3, 8, 36, 3]},
200: {'block': nn_blocks.bottleneck_block, 'layers': [3, 24, 36, 3]}
}
if resnet_depth not in model_params:
valid_resnet_depths = ', '.join(
[str(depth) for depth in sorted(model_params.keys())])
raise ValueError(
'The resnet_depth should be in [%s]. Not a valid resnet_depth:'%(
valid_resnet_depths), self._resnet_depth)
params = model_params[resnet_depth]
self._resnet_fn = self.resnet_v1_generator(
params['block'], params['layers'])
def __call__(self, inputs, is_training=False):
"""Returns the ResNet model for a given size and number of output classes.
Args:
inputs: a `Tesnor` with shape [batch_size, height, width, 3] representing
a batch of images.
is_training: `bool` if True, the model is in training mode.
Returns:
a `dict` containing `int` keys for continuous feature levels [2, 3, 4, 5].
The values are corresponding feature hierarchy in ResNet with shape
[batch_size, height_l, width_l, num_filters].
"""
with tf.variable_scope('resnet%s' % self._resnet_depth):
return self._resnet_fn(inputs, is_training)
def resnet_v1_generator(self, block_fn, layers):
"""Generator for ResNet v1 models.
Args:
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layers: list of 4 `int`s denoting the number of blocks to include in each
of the 4 block groups. Each group consists of blocks that take inputs of
the same resolution.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
"""
def model(inputs, is_training=False):
"""Creation of the model graph."""
inputs = nn_ops.conv2d_fixed_padding(
inputs=inputs, filters=64, kernel_size=7, strides=2,
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = self._batch_norm_relu(inputs, is_training=is_training)
inputs = tf.layers.max_pooling2d(
inputs=inputs, pool_size=3, strides=2, padding='SAME',
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
c2 = block_group(
inputs=inputs,
filters=64,
strides=1,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[0],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 2, 5),
name='block_group1',
is_training=is_training)
c3 = block_group(
inputs=c2,
filters=128,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[1],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 3, 5),
name='block_group2',
is_training=is_training)
c4 = block_group(
inputs=c3,
filters=256,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[2],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 4, 5),
name='block_group3',
is_training=is_training)
c5 = block_group(
inputs=c4,
filters=512,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[3],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 5, 5),
name='block_group4',
is_training=is_training)
return {2: c2, 3: c3, 4: c4, 5: c5}
return model
|
_get_dist_params
|
Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
|
from typing import ClassVar, List, Optional, Tuple, Callable, Union, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from alphazero.network.distributions import SquashedNormal, GeneralizedBeta
from alphazero.network.utils import (
_map_nonlinearities,
_process_str,
)
__all__ = [
"make_policy",
"DiagonalNormalPolicy",
"DiagonalGMMPolicy",
"GeneralizedBetaPolicy",
"DiscretePolicy",
]
class Policy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.action_bound = action_bound
# boundaries for the log standard deviation to increae training stability
self.log_param_min = log_param_min
self.log_param_max = log_param_max
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
components: int = getattr(self, "num_components", 1)
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, components={components}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, "
f"log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
@property
def bounds(self) -> np.ndarray:
if self.action_bound is None:
return np.array([-np.inf, np.inf], dtype=np.float32)
else:
return np.array([-self.action_bound, self.action_bound], dtype=np.float32)
@torch.no_grad()
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
raise NotImplementedError
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
x = self.trunk(x)
V_hat = self.value_head(x)
self.train()
return V_hat.detach().cpu().numpy()
class DiscretePolicy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
num_actions: int
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
# class variable
distribution_type: ClassVar[str] = "Categorical"
def __init__(
self,
representation_dim: int,
action_dim: int,
num_actions: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.num_actions = num_actions
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
self.distribution = D.Categorical
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], num_actions)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, "
f"hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
# MASKED: _get_dist_params function (lines 275-297)
def forward(self, x: torch.FloatTensor) -> Tuple[D.Categorical, torch.FloatTensor]:
"""Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
"""
pi_logits, V_hat = self._get_dist_params(x)
dist = D.Categorical(logits=pi_logits)
# samples from dist have shape [batch_size, action_dim]
return dist, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pi_logits, V_hat = self._get_dist_params(states)
# This creates an independent distribution for each action possibility
# so that the batch_shape of the distribution is identical to the shape of actions
# It's needed so that the log_probs are of the proper shape [batch_size, num_actions]
# else this throws since the distribution's batch_shape=[batch_shape] doesn't match
# the shape of the actions tensor, which is [batch_size, num_actions]
num_actions = actions.shape[1]
pi_hat = D.Categorical(
logits=pi_logits.unsqueeze(dim=1).repeat((1, num_actions, 1))
)
log_probs = pi_hat.log_prob(actions)
entropy = pi_hat.entropy()
return log_probs, entropy, V_hat
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
_, V_hat = self(x)
self.train()
return V_hat.detach().cpu().numpy()
@torch.no_grad()
def predict_pi(self, x: torch.Tensor) -> np.ndarray:
self.eval()
logits, _ = self._get_dist_params(x)
self.train()
return F.softmax(logits, dim=-1).detach().cpu().numpy()
class DiagonalNormalPolicy(Policy):
"""Policy class for factorized normal distributions.
Learns parameters for a factorized normal distribution of types
Normal, TanhSquashedNormal or GeneralizedSquashedNormal.
Factorized means that a conditionally independent (given a state) 1D Normal distribution is
learned for each dimension of the action space instead of a Multivariate Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be a Normallike distribution.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviation.
log_param_max : int
Upper bound for learned log standard deviation.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalNormal"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
mu, log_std = self.dist_head(x).chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, V_hat = self(states)
# This aligns the distribution batch_shape with the number of actions at the root
# It can be thought of as generating num_actions identical normal distributions for each agent
# and then sampling the log_prob for action from the distribution
# num_actions = actions.shape[-1]
# mu = mu.expand((-1, num_actions))
# sigma = sigma.expand((-1, num_actions))
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
log_probs = normal.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, _ = self(x)
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
action = normal.sample()
self.train()
return action.detach().cpu().numpy()
class DiagonalGMMPolicy(Policy):
"""Policy class for learning a factorized GMM.
Learns a 1D GMM for each dimension of the action space.
The components of the GMM are either Normal or squashed Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be Normallike.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
num_components : int
Number of mixture components.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviations.
log_param_max : int
Upper bound for learned log standard deviations.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
num_components: int
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalGMM"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
num_components: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.num_components = num_components
# calculate the number of parameters needed for the GMM
# 2 comes from each distribution being specifiec by 2 parameters
dist_params = num_components * (2 * self.action_dim + 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], dist_params)
def forward(
self, x: torch.FloatTensor
) -> Tuple[
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components]
# the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components
# the elements in the second term (+ num_components) are the mixture coefficients
mixture_params = self.dist_head(x)
# get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim]
dist_params = mixture_params[
..., : self.num_components * 2 * self.action_dim
].view(x.shape[0], -1)
# get the num_components last tensor elements as logits for the mixture coefficients
log_coeff = mixture_params[..., -self.num_components :]
# split the dist_params along the middle dimension (2*num_components) into means and log stddevs
mu, log_std = dist_params.chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, log_coeff, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, log_coeff, V_hat = self(states)
# We need num_actions identical gmms to sample log_probs for each action
num_actions = actions.shape[-1]
mu = mu.unsqueeze(dim=1).expand((-1, num_actions, -1))
sigma = sigma.unsqueeze(dim=1).expand((-1, num_actions, -1))
log_coeff = log_coeff.unsqueeze(dim=1).expand((-1, num_actions, -1))
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
log_probs = gmm.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, log_coeff, _ = self(x)
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
action = gmm.sample()
self.train()
return action.detach().cpu().numpy()
class GeneralizedBetaPolicy(Policy):
"""Policy class for a generalized Beta distribution.
The beta distribution used by this class is generalized in that it has support
[-c, c] instead of [0,1].
This is achieved via a location-scale transformation (2c)x - c, where c are the desired bounds.
Since both parameters alpha, beta > 0, the log-learning-trick for the Normal standard deviation
is applied to both parameters.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log_alpha and log_beta.
log_param_max : int
Upper bound for learned log_alpha and log_beta.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: float
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "GeneralizedBeta"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: float,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
assert action_bound, "Beta policy needs action bounds specified."
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# create distribution parameters
dist_params = self.dist_head(x)
# Use the log_std_dev trick for alpha and beta
# since both alpha > 0 and beta > 0
dist_params = torch.clamp(
dist_params, min=self.log_param_min, max=self.log_param_max
)
alpha, beta = dist_params.exp().chunk(2, dim=-1)
return alpha, beta, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
alpha, beta, V_hat = self(states)
# ensure that the distribution batch_shape fits the number of actions taken for
# each agent at the root
num_actions = actions.shape[-1]
alpha = alpha.expand(-1, num_actions)
beta = beta.expand(-1, num_actions)
beta_dist = GeneralizedBeta(alpha, beta, self.action_bound)
log_probs = beta_dist.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
alpha, beta, _ = self(x)
beta_dist = D.Beta(alpha, beta)
action = beta_dist.sample()
self.train()
return action.detach().cpu().numpy()
def make_policy(
representation_dim: int,
action_dim: int,
distribution: str,
hidden_dimensions: List[int],
nonlinearity: str,
num_components: Optional[int] = None,
num_actions: Optional[int] = None,
action_bound: Optional[float] = None,
layernorm: bool = False,
log_param_min: float = -5,
log_param_max: float = 2,
) -> Union[
DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy
]:
"""Constructs a policy network from a given config.
The following config keys need to be specified:
- "representation_dim": int
- "action_dim": int
- "distribution": str
- "num_components": int
- "action_bound": float
- "hidden_dimensions": List[int]
- "nonlinearity": str
- "layernorm": bool
- "log_param_min": Optional[float]
- "log_param_max": Optional[float]
Parameters
----------
representation_dim: int
Dimensionality of the vector state space of the environment.
action_dim: int
Number of action dimensions in the environment.
distribution: str
Name of the policy distribution as string ["discrete", "beta", "normal"].
hidden_dimensions: List[int]
List specification of the MLP policy. Each int element in the list represents a hidden
layer in the network with the respective number of neurons.
nonlinearity: str
Nonlinearity (activation function) used in the policy network.
num_components: Optional[int] = None
Number of components for mixture distributions.
num_actions: Optional[int] = None
Number of available actions. Used in the discrete policy.
action_bound: Optional[float] = None
Action bounds for the squashed normal or squashed GMM policy.
layernorm: bool = False
Use Layernorm in the policy network if set to True.
log_param_min: float = -5
Lower bound of the learned log parameters (standard deviation for Normal distributions).
log_param_max: float = 2
Upper bound of the learned log parameters.
Returns
-------
Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]
Policy network intance.
"""
# basic config string preprocessing to ensure mapping works later
distribution = _process_str(distribution)
nonlinearity = _process_str(nonlinearity)
if distribution == "discrete":
return DiscretePolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_actions=cast(int, num_actions),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
)
elif distribution == "beta":
assert num_components
return GeneralizedBetaPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=cast(float, action_bound),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
assert num_components
if 1 < num_components:
return DiagonalGMMPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_components=num_components,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
return DiagonalNormalPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
|
def _get_dist_params(
self, x: torch.Tensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
pi_logits = self.dist_head(x)
return pi_logits, V_hat
| 275
| 297
|
from typing import ClassVar, List, Optional, Tuple, Callable, Union, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from alphazero.network.distributions import SquashedNormal, GeneralizedBeta
from alphazero.network.utils import (
_map_nonlinearities,
_process_str,
)
__all__ = [
"make_policy",
"DiagonalNormalPolicy",
"DiagonalGMMPolicy",
"GeneralizedBetaPolicy",
"DiscretePolicy",
]
class Policy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.action_bound = action_bound
# boundaries for the log standard deviation to increae training stability
self.log_param_min = log_param_min
self.log_param_max = log_param_max
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
components: int = getattr(self, "num_components", 1)
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, components={components}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, "
f"log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
@property
def bounds(self) -> np.ndarray:
if self.action_bound is None:
return np.array([-np.inf, np.inf], dtype=np.float32)
else:
return np.array([-self.action_bound, self.action_bound], dtype=np.float32)
@torch.no_grad()
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
raise NotImplementedError
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
x = self.trunk(x)
V_hat = self.value_head(x)
self.train()
return V_hat.detach().cpu().numpy()
class DiscretePolicy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
num_actions: int
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
# class variable
distribution_type: ClassVar[str] = "Categorical"
def __init__(
self,
representation_dim: int,
action_dim: int,
num_actions: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.num_actions = num_actions
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
self.distribution = D.Categorical
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], num_actions)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, "
f"hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
def _get_dist_params(
self, x: torch.Tensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
pi_logits = self.dist_head(x)
return pi_logits, V_hat
def forward(self, x: torch.FloatTensor) -> Tuple[D.Categorical, torch.FloatTensor]:
"""Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
"""
pi_logits, V_hat = self._get_dist_params(x)
dist = D.Categorical(logits=pi_logits)
# samples from dist have shape [batch_size, action_dim]
return dist, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pi_logits, V_hat = self._get_dist_params(states)
# This creates an independent distribution for each action possibility
# so that the batch_shape of the distribution is identical to the shape of actions
# It's needed so that the log_probs are of the proper shape [batch_size, num_actions]
# else this throws since the distribution's batch_shape=[batch_shape] doesn't match
# the shape of the actions tensor, which is [batch_size, num_actions]
num_actions = actions.shape[1]
pi_hat = D.Categorical(
logits=pi_logits.unsqueeze(dim=1).repeat((1, num_actions, 1))
)
log_probs = pi_hat.log_prob(actions)
entropy = pi_hat.entropy()
return log_probs, entropy, V_hat
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
_, V_hat = self(x)
self.train()
return V_hat.detach().cpu().numpy()
@torch.no_grad()
def predict_pi(self, x: torch.Tensor) -> np.ndarray:
self.eval()
logits, _ = self._get_dist_params(x)
self.train()
return F.softmax(logits, dim=-1).detach().cpu().numpy()
class DiagonalNormalPolicy(Policy):
"""Policy class for factorized normal distributions.
Learns parameters for a factorized normal distribution of types
Normal, TanhSquashedNormal or GeneralizedSquashedNormal.
Factorized means that a conditionally independent (given a state) 1D Normal distribution is
learned for each dimension of the action space instead of a Multivariate Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be a Normallike distribution.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviation.
log_param_max : int
Upper bound for learned log standard deviation.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalNormal"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
mu, log_std = self.dist_head(x).chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, V_hat = self(states)
# This aligns the distribution batch_shape with the number of actions at the root
# It can be thought of as generating num_actions identical normal distributions for each agent
# and then sampling the log_prob for action from the distribution
# num_actions = actions.shape[-1]
# mu = mu.expand((-1, num_actions))
# sigma = sigma.expand((-1, num_actions))
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
log_probs = normal.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, _ = self(x)
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
action = normal.sample()
self.train()
return action.detach().cpu().numpy()
class DiagonalGMMPolicy(Policy):
"""Policy class for learning a factorized GMM.
Learns a 1D GMM for each dimension of the action space.
The components of the GMM are either Normal or squashed Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be Normallike.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
num_components : int
Number of mixture components.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviations.
log_param_max : int
Upper bound for learned log standard deviations.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
num_components: int
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalGMM"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
num_components: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.num_components = num_components
# calculate the number of parameters needed for the GMM
# 2 comes from each distribution being specifiec by 2 parameters
dist_params = num_components * (2 * self.action_dim + 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], dist_params)
def forward(
self, x: torch.FloatTensor
) -> Tuple[
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components]
# the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components
# the elements in the second term (+ num_components) are the mixture coefficients
mixture_params = self.dist_head(x)
# get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim]
dist_params = mixture_params[
..., : self.num_components * 2 * self.action_dim
].view(x.shape[0], -1)
# get the num_components last tensor elements as logits for the mixture coefficients
log_coeff = mixture_params[..., -self.num_components :]
# split the dist_params along the middle dimension (2*num_components) into means and log stddevs
mu, log_std = dist_params.chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, log_coeff, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, log_coeff, V_hat = self(states)
# We need num_actions identical gmms to sample log_probs for each action
num_actions = actions.shape[-1]
mu = mu.unsqueeze(dim=1).expand((-1, num_actions, -1))
sigma = sigma.unsqueeze(dim=1).expand((-1, num_actions, -1))
log_coeff = log_coeff.unsqueeze(dim=1).expand((-1, num_actions, -1))
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
log_probs = gmm.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, log_coeff, _ = self(x)
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
action = gmm.sample()
self.train()
return action.detach().cpu().numpy()
class GeneralizedBetaPolicy(Policy):
"""Policy class for a generalized Beta distribution.
The beta distribution used by this class is generalized in that it has support
[-c, c] instead of [0,1].
This is achieved via a location-scale transformation (2c)x - c, where c are the desired bounds.
Since both parameters alpha, beta > 0, the log-learning-trick for the Normal standard deviation
is applied to both parameters.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log_alpha and log_beta.
log_param_max : int
Upper bound for learned log_alpha and log_beta.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: float
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "GeneralizedBeta"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: float,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
assert action_bound, "Beta policy needs action bounds specified."
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# create distribution parameters
dist_params = self.dist_head(x)
# Use the log_std_dev trick for alpha and beta
# since both alpha > 0 and beta > 0
dist_params = torch.clamp(
dist_params, min=self.log_param_min, max=self.log_param_max
)
alpha, beta = dist_params.exp().chunk(2, dim=-1)
return alpha, beta, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
alpha, beta, V_hat = self(states)
# ensure that the distribution batch_shape fits the number of actions taken for
# each agent at the root
num_actions = actions.shape[-1]
alpha = alpha.expand(-1, num_actions)
beta = beta.expand(-1, num_actions)
beta_dist = GeneralizedBeta(alpha, beta, self.action_bound)
log_probs = beta_dist.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
alpha, beta, _ = self(x)
beta_dist = D.Beta(alpha, beta)
action = beta_dist.sample()
self.train()
return action.detach().cpu().numpy()
def make_policy(
representation_dim: int,
action_dim: int,
distribution: str,
hidden_dimensions: List[int],
nonlinearity: str,
num_components: Optional[int] = None,
num_actions: Optional[int] = None,
action_bound: Optional[float] = None,
layernorm: bool = False,
log_param_min: float = -5,
log_param_max: float = 2,
) -> Union[
DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy
]:
"""Constructs a policy network from a given config.
The following config keys need to be specified:
- "representation_dim": int
- "action_dim": int
- "distribution": str
- "num_components": int
- "action_bound": float
- "hidden_dimensions": List[int]
- "nonlinearity": str
- "layernorm": bool
- "log_param_min": Optional[float]
- "log_param_max": Optional[float]
Parameters
----------
representation_dim: int
Dimensionality of the vector state space of the environment.
action_dim: int
Number of action dimensions in the environment.
distribution: str
Name of the policy distribution as string ["discrete", "beta", "normal"].
hidden_dimensions: List[int]
List specification of the MLP policy. Each int element in the list represents a hidden
layer in the network with the respective number of neurons.
nonlinearity: str
Nonlinearity (activation function) used in the policy network.
num_components: Optional[int] = None
Number of components for mixture distributions.
num_actions: Optional[int] = None
Number of available actions. Used in the discrete policy.
action_bound: Optional[float] = None
Action bounds for the squashed normal or squashed GMM policy.
layernorm: bool = False
Use Layernorm in the policy network if set to True.
log_param_min: float = -5
Lower bound of the learned log parameters (standard deviation for Normal distributions).
log_param_max: float = 2
Upper bound of the learned log parameters.
Returns
-------
Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]
Policy network intance.
"""
# basic config string preprocessing to ensure mapping works later
distribution = _process_str(distribution)
nonlinearity = _process_str(nonlinearity)
if distribution == "discrete":
return DiscretePolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_actions=cast(int, num_actions),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
)
elif distribution == "beta":
assert num_components
return GeneralizedBetaPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=cast(float, action_bound),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
assert num_components
if 1 < num_components:
return DiagonalGMMPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_components=num_components,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
return DiagonalNormalPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
|
forward
|
Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
|
from typing import ClassVar, List, Optional, Tuple, Callable, Union, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from alphazero.network.distributions import SquashedNormal, GeneralizedBeta
from alphazero.network.utils import (
_map_nonlinearities,
_process_str,
)
__all__ = [
"make_policy",
"DiagonalNormalPolicy",
"DiagonalGMMPolicy",
"GeneralizedBetaPolicy",
"DiscretePolicy",
]
class Policy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.action_bound = action_bound
# boundaries for the log standard deviation to increae training stability
self.log_param_min = log_param_min
self.log_param_max = log_param_max
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
components: int = getattr(self, "num_components", 1)
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, components={components}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, "
f"log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
@property
def bounds(self) -> np.ndarray:
if self.action_bound is None:
return np.array([-np.inf, np.inf], dtype=np.float32)
else:
return np.array([-self.action_bound, self.action_bound], dtype=np.float32)
@torch.no_grad()
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
raise NotImplementedError
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
x = self.trunk(x)
V_hat = self.value_head(x)
self.train()
return V_hat.detach().cpu().numpy()
class DiscretePolicy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
num_actions: int
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
# class variable
distribution_type: ClassVar[str] = "Categorical"
def __init__(
self,
representation_dim: int,
action_dim: int,
num_actions: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.num_actions = num_actions
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
self.distribution = D.Categorical
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], num_actions)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, "
f"hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
def _get_dist_params(
self, x: torch.Tensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
pi_logits = self.dist_head(x)
return pi_logits, V_hat
# MASKED: forward function (lines 299-317)
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pi_logits, V_hat = self._get_dist_params(states)
# This creates an independent distribution for each action possibility
# so that the batch_shape of the distribution is identical to the shape of actions
# It's needed so that the log_probs are of the proper shape [batch_size, num_actions]
# else this throws since the distribution's batch_shape=[batch_shape] doesn't match
# the shape of the actions tensor, which is [batch_size, num_actions]
num_actions = actions.shape[1]
pi_hat = D.Categorical(
logits=pi_logits.unsqueeze(dim=1).repeat((1, num_actions, 1))
)
log_probs = pi_hat.log_prob(actions)
entropy = pi_hat.entropy()
return log_probs, entropy, V_hat
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
_, V_hat = self(x)
self.train()
return V_hat.detach().cpu().numpy()
@torch.no_grad()
def predict_pi(self, x: torch.Tensor) -> np.ndarray:
self.eval()
logits, _ = self._get_dist_params(x)
self.train()
return F.softmax(logits, dim=-1).detach().cpu().numpy()
class DiagonalNormalPolicy(Policy):
"""Policy class for factorized normal distributions.
Learns parameters for a factorized normal distribution of types
Normal, TanhSquashedNormal or GeneralizedSquashedNormal.
Factorized means that a conditionally independent (given a state) 1D Normal distribution is
learned for each dimension of the action space instead of a Multivariate Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be a Normallike distribution.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviation.
log_param_max : int
Upper bound for learned log standard deviation.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalNormal"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
mu, log_std = self.dist_head(x).chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, V_hat = self(states)
# This aligns the distribution batch_shape with the number of actions at the root
# It can be thought of as generating num_actions identical normal distributions for each agent
# and then sampling the log_prob for action from the distribution
# num_actions = actions.shape[-1]
# mu = mu.expand((-1, num_actions))
# sigma = sigma.expand((-1, num_actions))
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
log_probs = normal.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, _ = self(x)
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
action = normal.sample()
self.train()
return action.detach().cpu().numpy()
class DiagonalGMMPolicy(Policy):
"""Policy class for learning a factorized GMM.
Learns a 1D GMM for each dimension of the action space.
The components of the GMM are either Normal or squashed Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be Normallike.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
num_components : int
Number of mixture components.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviations.
log_param_max : int
Upper bound for learned log standard deviations.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
num_components: int
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalGMM"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
num_components: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.num_components = num_components
# calculate the number of parameters needed for the GMM
# 2 comes from each distribution being specifiec by 2 parameters
dist_params = num_components * (2 * self.action_dim + 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], dist_params)
def forward(
self, x: torch.FloatTensor
) -> Tuple[
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components]
# the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components
# the elements in the second term (+ num_components) are the mixture coefficients
mixture_params = self.dist_head(x)
# get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim]
dist_params = mixture_params[
..., : self.num_components * 2 * self.action_dim
].view(x.shape[0], -1)
# get the num_components last tensor elements as logits for the mixture coefficients
log_coeff = mixture_params[..., -self.num_components :]
# split the dist_params along the middle dimension (2*num_components) into means and log stddevs
mu, log_std = dist_params.chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, log_coeff, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, log_coeff, V_hat = self(states)
# We need num_actions identical gmms to sample log_probs for each action
num_actions = actions.shape[-1]
mu = mu.unsqueeze(dim=1).expand((-1, num_actions, -1))
sigma = sigma.unsqueeze(dim=1).expand((-1, num_actions, -1))
log_coeff = log_coeff.unsqueeze(dim=1).expand((-1, num_actions, -1))
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
log_probs = gmm.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, log_coeff, _ = self(x)
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
action = gmm.sample()
self.train()
return action.detach().cpu().numpy()
class GeneralizedBetaPolicy(Policy):
"""Policy class for a generalized Beta distribution.
The beta distribution used by this class is generalized in that it has support
[-c, c] instead of [0,1].
This is achieved via a location-scale transformation (2c)x - c, where c are the desired bounds.
Since both parameters alpha, beta > 0, the log-learning-trick for the Normal standard deviation
is applied to both parameters.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log_alpha and log_beta.
log_param_max : int
Upper bound for learned log_alpha and log_beta.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: float
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "GeneralizedBeta"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: float,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
assert action_bound, "Beta policy needs action bounds specified."
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# create distribution parameters
dist_params = self.dist_head(x)
# Use the log_std_dev trick for alpha and beta
# since both alpha > 0 and beta > 0
dist_params = torch.clamp(
dist_params, min=self.log_param_min, max=self.log_param_max
)
alpha, beta = dist_params.exp().chunk(2, dim=-1)
return alpha, beta, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
alpha, beta, V_hat = self(states)
# ensure that the distribution batch_shape fits the number of actions taken for
# each agent at the root
num_actions = actions.shape[-1]
alpha = alpha.expand(-1, num_actions)
beta = beta.expand(-1, num_actions)
beta_dist = GeneralizedBeta(alpha, beta, self.action_bound)
log_probs = beta_dist.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
alpha, beta, _ = self(x)
beta_dist = D.Beta(alpha, beta)
action = beta_dist.sample()
self.train()
return action.detach().cpu().numpy()
def make_policy(
representation_dim: int,
action_dim: int,
distribution: str,
hidden_dimensions: List[int],
nonlinearity: str,
num_components: Optional[int] = None,
num_actions: Optional[int] = None,
action_bound: Optional[float] = None,
layernorm: bool = False,
log_param_min: float = -5,
log_param_max: float = 2,
) -> Union[
DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy
]:
"""Constructs a policy network from a given config.
The following config keys need to be specified:
- "representation_dim": int
- "action_dim": int
- "distribution": str
- "num_components": int
- "action_bound": float
- "hidden_dimensions": List[int]
- "nonlinearity": str
- "layernorm": bool
- "log_param_min": Optional[float]
- "log_param_max": Optional[float]
Parameters
----------
representation_dim: int
Dimensionality of the vector state space of the environment.
action_dim: int
Number of action dimensions in the environment.
distribution: str
Name of the policy distribution as string ["discrete", "beta", "normal"].
hidden_dimensions: List[int]
List specification of the MLP policy. Each int element in the list represents a hidden
layer in the network with the respective number of neurons.
nonlinearity: str
Nonlinearity (activation function) used in the policy network.
num_components: Optional[int] = None
Number of components for mixture distributions.
num_actions: Optional[int] = None
Number of available actions. Used in the discrete policy.
action_bound: Optional[float] = None
Action bounds for the squashed normal or squashed GMM policy.
layernorm: bool = False
Use Layernorm in the policy network if set to True.
log_param_min: float = -5
Lower bound of the learned log parameters (standard deviation for Normal distributions).
log_param_max: float = 2
Upper bound of the learned log parameters.
Returns
-------
Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]
Policy network intance.
"""
# basic config string preprocessing to ensure mapping works later
distribution = _process_str(distribution)
nonlinearity = _process_str(nonlinearity)
if distribution == "discrete":
return DiscretePolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_actions=cast(int, num_actions),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
)
elif distribution == "beta":
assert num_components
return GeneralizedBetaPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=cast(float, action_bound),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
assert num_components
if 1 < num_components:
return DiagonalGMMPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_components=num_components,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
return DiagonalNormalPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
|
def forward(self, x: torch.FloatTensor) -> Tuple[D.Categorical, torch.FloatTensor]:
"""Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
"""
pi_logits, V_hat = self._get_dist_params(x)
dist = D.Categorical(logits=pi_logits)
# samples from dist have shape [batch_size, action_dim]
return dist, V_hat
| 299
| 317
|
from typing import ClassVar, List, Optional, Tuple, Callable, Union, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from alphazero.network.distributions import SquashedNormal, GeneralizedBeta
from alphazero.network.utils import (
_map_nonlinearities,
_process_str,
)
__all__ = [
"make_policy",
"DiagonalNormalPolicy",
"DiagonalGMMPolicy",
"GeneralizedBetaPolicy",
"DiscretePolicy",
]
class Policy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.action_bound = action_bound
# boundaries for the log standard deviation to increae training stability
self.log_param_min = log_param_min
self.log_param_max = log_param_max
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
components: int = getattr(self, "num_components", 1)
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, components={components}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, "
f"log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
@property
def bounds(self) -> np.ndarray:
if self.action_bound is None:
return np.array([-np.inf, np.inf], dtype=np.float32)
else:
return np.array([-self.action_bound, self.action_bound], dtype=np.float32)
@torch.no_grad()
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
raise NotImplementedError
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
x = self.trunk(x)
V_hat = self.value_head(x)
self.train()
return V_hat.detach().cpu().numpy()
class DiscretePolicy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
num_actions: int
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
# class variable
distribution_type: ClassVar[str] = "Categorical"
def __init__(
self,
representation_dim: int,
action_dim: int,
num_actions: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.num_actions = num_actions
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
self.distribution = D.Categorical
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], num_actions)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, "
f"hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
def _get_dist_params(
self, x: torch.Tensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
pi_logits = self.dist_head(x)
return pi_logits, V_hat
def forward(self, x: torch.FloatTensor) -> Tuple[D.Categorical, torch.FloatTensor]:
"""Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
"""
pi_logits, V_hat = self._get_dist_params(x)
dist = D.Categorical(logits=pi_logits)
# samples from dist have shape [batch_size, action_dim]
return dist, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pi_logits, V_hat = self._get_dist_params(states)
# This creates an independent distribution for each action possibility
# so that the batch_shape of the distribution is identical to the shape of actions
# It's needed so that the log_probs are of the proper shape [batch_size, num_actions]
# else this throws since the distribution's batch_shape=[batch_shape] doesn't match
# the shape of the actions tensor, which is [batch_size, num_actions]
num_actions = actions.shape[1]
pi_hat = D.Categorical(
logits=pi_logits.unsqueeze(dim=1).repeat((1, num_actions, 1))
)
log_probs = pi_hat.log_prob(actions)
entropy = pi_hat.entropy()
return log_probs, entropy, V_hat
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
_, V_hat = self(x)
self.train()
return V_hat.detach().cpu().numpy()
@torch.no_grad()
def predict_pi(self, x: torch.Tensor) -> np.ndarray:
self.eval()
logits, _ = self._get_dist_params(x)
self.train()
return F.softmax(logits, dim=-1).detach().cpu().numpy()
class DiagonalNormalPolicy(Policy):
"""Policy class for factorized normal distributions.
Learns parameters for a factorized normal distribution of types
Normal, TanhSquashedNormal or GeneralizedSquashedNormal.
Factorized means that a conditionally independent (given a state) 1D Normal distribution is
learned for each dimension of the action space instead of a Multivariate Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be a Normallike distribution.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviation.
log_param_max : int
Upper bound for learned log standard deviation.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalNormal"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
mu, log_std = self.dist_head(x).chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, V_hat = self(states)
# This aligns the distribution batch_shape with the number of actions at the root
# It can be thought of as generating num_actions identical normal distributions for each agent
# and then sampling the log_prob for action from the distribution
# num_actions = actions.shape[-1]
# mu = mu.expand((-1, num_actions))
# sigma = sigma.expand((-1, num_actions))
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
log_probs = normal.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, _ = self(x)
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
action = normal.sample()
self.train()
return action.detach().cpu().numpy()
class DiagonalGMMPolicy(Policy):
"""Policy class for learning a factorized GMM.
Learns a 1D GMM for each dimension of the action space.
The components of the GMM are either Normal or squashed Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be Normallike.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
num_components : int
Number of mixture components.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviations.
log_param_max : int
Upper bound for learned log standard deviations.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
num_components: int
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalGMM"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
num_components: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.num_components = num_components
# calculate the number of parameters needed for the GMM
# 2 comes from each distribution being specifiec by 2 parameters
dist_params = num_components * (2 * self.action_dim + 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], dist_params)
def forward(
self, x: torch.FloatTensor
) -> Tuple[
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components]
# the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components
# the elements in the second term (+ num_components) are the mixture coefficients
mixture_params = self.dist_head(x)
# get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim]
dist_params = mixture_params[
..., : self.num_components * 2 * self.action_dim
].view(x.shape[0], -1)
# get the num_components last tensor elements as logits for the mixture coefficients
log_coeff = mixture_params[..., -self.num_components :]
# split the dist_params along the middle dimension (2*num_components) into means and log stddevs
mu, log_std = dist_params.chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, log_coeff, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, log_coeff, V_hat = self(states)
# We need num_actions identical gmms to sample log_probs for each action
num_actions = actions.shape[-1]
mu = mu.unsqueeze(dim=1).expand((-1, num_actions, -1))
sigma = sigma.unsqueeze(dim=1).expand((-1, num_actions, -1))
log_coeff = log_coeff.unsqueeze(dim=1).expand((-1, num_actions, -1))
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
log_probs = gmm.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, log_coeff, _ = self(x)
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
action = gmm.sample()
self.train()
return action.detach().cpu().numpy()
class GeneralizedBetaPolicy(Policy):
"""Policy class for a generalized Beta distribution.
The beta distribution used by this class is generalized in that it has support
[-c, c] instead of [0,1].
This is achieved via a location-scale transformation (2c)x - c, where c are the desired bounds.
Since both parameters alpha, beta > 0, the log-learning-trick for the Normal standard deviation
is applied to both parameters.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log_alpha and log_beta.
log_param_max : int
Upper bound for learned log_alpha and log_beta.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: float
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "GeneralizedBeta"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: float,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
assert action_bound, "Beta policy needs action bounds specified."
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# create distribution parameters
dist_params = self.dist_head(x)
# Use the log_std_dev trick for alpha and beta
# since both alpha > 0 and beta > 0
dist_params = torch.clamp(
dist_params, min=self.log_param_min, max=self.log_param_max
)
alpha, beta = dist_params.exp().chunk(2, dim=-1)
return alpha, beta, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
alpha, beta, V_hat = self(states)
# ensure that the distribution batch_shape fits the number of actions taken for
# each agent at the root
num_actions = actions.shape[-1]
alpha = alpha.expand(-1, num_actions)
beta = beta.expand(-1, num_actions)
beta_dist = GeneralizedBeta(alpha, beta, self.action_bound)
log_probs = beta_dist.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
alpha, beta, _ = self(x)
beta_dist = D.Beta(alpha, beta)
action = beta_dist.sample()
self.train()
return action.detach().cpu().numpy()
def make_policy(
representation_dim: int,
action_dim: int,
distribution: str,
hidden_dimensions: List[int],
nonlinearity: str,
num_components: Optional[int] = None,
num_actions: Optional[int] = None,
action_bound: Optional[float] = None,
layernorm: bool = False,
log_param_min: float = -5,
log_param_max: float = 2,
) -> Union[
DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy
]:
"""Constructs a policy network from a given config.
The following config keys need to be specified:
- "representation_dim": int
- "action_dim": int
- "distribution": str
- "num_components": int
- "action_bound": float
- "hidden_dimensions": List[int]
- "nonlinearity": str
- "layernorm": bool
- "log_param_min": Optional[float]
- "log_param_max": Optional[float]
Parameters
----------
representation_dim: int
Dimensionality of the vector state space of the environment.
action_dim: int
Number of action dimensions in the environment.
distribution: str
Name of the policy distribution as string ["discrete", "beta", "normal"].
hidden_dimensions: List[int]
List specification of the MLP policy. Each int element in the list represents a hidden
layer in the network with the respective number of neurons.
nonlinearity: str
Nonlinearity (activation function) used in the policy network.
num_components: Optional[int] = None
Number of components for mixture distributions.
num_actions: Optional[int] = None
Number of available actions. Used in the discrete policy.
action_bound: Optional[float] = None
Action bounds for the squashed normal or squashed GMM policy.
layernorm: bool = False
Use Layernorm in the policy network if set to True.
log_param_min: float = -5
Lower bound of the learned log parameters (standard deviation for Normal distributions).
log_param_max: float = 2
Upper bound of the learned log parameters.
Returns
-------
Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]
Policy network intance.
"""
# basic config string preprocessing to ensure mapping works later
distribution = _process_str(distribution)
nonlinearity = _process_str(nonlinearity)
if distribution == "discrete":
return DiscretePolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_actions=cast(int, num_actions),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
)
elif distribution == "beta":
assert num_components
return GeneralizedBetaPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=cast(float, action_bound),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
assert num_components
if 1 < num_components:
return DiagonalGMMPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_components=num_components,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
return DiagonalNormalPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
|
forward
|
Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
|
from typing import ClassVar, List, Optional, Tuple, Callable, Union, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from alphazero.network.distributions import SquashedNormal, GeneralizedBeta
from alphazero.network.utils import (
_map_nonlinearities,
_process_str,
)
__all__ = [
"make_policy",
"DiagonalNormalPolicy",
"DiagonalGMMPolicy",
"GeneralizedBetaPolicy",
"DiscretePolicy",
]
class Policy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.action_bound = action_bound
# boundaries for the log standard deviation to increae training stability
self.log_param_min = log_param_min
self.log_param_max = log_param_max
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
components: int = getattr(self, "num_components", 1)
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, components={components}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, "
f"log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
@property
def bounds(self) -> np.ndarray:
if self.action_bound is None:
return np.array([-np.inf, np.inf], dtype=np.float32)
else:
return np.array([-self.action_bound, self.action_bound], dtype=np.float32)
@torch.no_grad()
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
raise NotImplementedError
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
x = self.trunk(x)
V_hat = self.value_head(x)
self.train()
return V_hat.detach().cpu().numpy()
class DiscretePolicy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
num_actions: int
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
# class variable
distribution_type: ClassVar[str] = "Categorical"
def __init__(
self,
representation_dim: int,
action_dim: int,
num_actions: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.num_actions = num_actions
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
self.distribution = D.Categorical
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], num_actions)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, "
f"hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
def _get_dist_params(
self, x: torch.Tensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
pi_logits = self.dist_head(x)
return pi_logits, V_hat
def forward(self, x: torch.FloatTensor) -> Tuple[D.Categorical, torch.FloatTensor]:
"""Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
"""
pi_logits, V_hat = self._get_dist_params(x)
dist = D.Categorical(logits=pi_logits)
# samples from dist have shape [batch_size, action_dim]
return dist, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pi_logits, V_hat = self._get_dist_params(states)
# This creates an independent distribution for each action possibility
# so that the batch_shape of the distribution is identical to the shape of actions
# It's needed so that the log_probs are of the proper shape [batch_size, num_actions]
# else this throws since the distribution's batch_shape=[batch_shape] doesn't match
# the shape of the actions tensor, which is [batch_size, num_actions]
num_actions = actions.shape[1]
pi_hat = D.Categorical(
logits=pi_logits.unsqueeze(dim=1).repeat((1, num_actions, 1))
)
log_probs = pi_hat.log_prob(actions)
entropy = pi_hat.entropy()
return log_probs, entropy, V_hat
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
_, V_hat = self(x)
self.train()
return V_hat.detach().cpu().numpy()
@torch.no_grad()
def predict_pi(self, x: torch.Tensor) -> np.ndarray:
self.eval()
logits, _ = self._get_dist_params(x)
self.train()
return F.softmax(logits, dim=-1).detach().cpu().numpy()
class DiagonalNormalPolicy(Policy):
"""Policy class for factorized normal distributions.
Learns parameters for a factorized normal distribution of types
Normal, TanhSquashedNormal or GeneralizedSquashedNormal.
Factorized means that a conditionally independent (given a state) 1D Normal distribution is
learned for each dimension of the action space instead of a Multivariate Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be a Normallike distribution.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviation.
log_param_max : int
Upper bound for learned log standard deviation.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalNormal"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
# MASKED: forward function (lines 436-464)
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, V_hat = self(states)
# This aligns the distribution batch_shape with the number of actions at the root
# It can be thought of as generating num_actions identical normal distributions for each agent
# and then sampling the log_prob for action from the distribution
# num_actions = actions.shape[-1]
# mu = mu.expand((-1, num_actions))
# sigma = sigma.expand((-1, num_actions))
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
log_probs = normal.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, _ = self(x)
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
action = normal.sample()
self.train()
return action.detach().cpu().numpy()
class DiagonalGMMPolicy(Policy):
"""Policy class for learning a factorized GMM.
Learns a 1D GMM for each dimension of the action space.
The components of the GMM are either Normal or squashed Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be Normallike.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
num_components : int
Number of mixture components.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviations.
log_param_max : int
Upper bound for learned log standard deviations.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
num_components: int
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalGMM"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
num_components: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.num_components = num_components
# calculate the number of parameters needed for the GMM
# 2 comes from each distribution being specifiec by 2 parameters
dist_params = num_components * (2 * self.action_dim + 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], dist_params)
def forward(
self, x: torch.FloatTensor
) -> Tuple[
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components]
# the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components
# the elements in the second term (+ num_components) are the mixture coefficients
mixture_params = self.dist_head(x)
# get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim]
dist_params = mixture_params[
..., : self.num_components * 2 * self.action_dim
].view(x.shape[0], -1)
# get the num_components last tensor elements as logits for the mixture coefficients
log_coeff = mixture_params[..., -self.num_components :]
# split the dist_params along the middle dimension (2*num_components) into means and log stddevs
mu, log_std = dist_params.chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, log_coeff, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, log_coeff, V_hat = self(states)
# We need num_actions identical gmms to sample log_probs for each action
num_actions = actions.shape[-1]
mu = mu.unsqueeze(dim=1).expand((-1, num_actions, -1))
sigma = sigma.unsqueeze(dim=1).expand((-1, num_actions, -1))
log_coeff = log_coeff.unsqueeze(dim=1).expand((-1, num_actions, -1))
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
log_probs = gmm.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, log_coeff, _ = self(x)
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
action = gmm.sample()
self.train()
return action.detach().cpu().numpy()
class GeneralizedBetaPolicy(Policy):
"""Policy class for a generalized Beta distribution.
The beta distribution used by this class is generalized in that it has support
[-c, c] instead of [0,1].
This is achieved via a location-scale transformation (2c)x - c, where c are the desired bounds.
Since both parameters alpha, beta > 0, the log-learning-trick for the Normal standard deviation
is applied to both parameters.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log_alpha and log_beta.
log_param_max : int
Upper bound for learned log_alpha and log_beta.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: float
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "GeneralizedBeta"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: float,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
assert action_bound, "Beta policy needs action bounds specified."
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# create distribution parameters
dist_params = self.dist_head(x)
# Use the log_std_dev trick for alpha and beta
# since both alpha > 0 and beta > 0
dist_params = torch.clamp(
dist_params, min=self.log_param_min, max=self.log_param_max
)
alpha, beta = dist_params.exp().chunk(2, dim=-1)
return alpha, beta, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
alpha, beta, V_hat = self(states)
# ensure that the distribution batch_shape fits the number of actions taken for
# each agent at the root
num_actions = actions.shape[-1]
alpha = alpha.expand(-1, num_actions)
beta = beta.expand(-1, num_actions)
beta_dist = GeneralizedBeta(alpha, beta, self.action_bound)
log_probs = beta_dist.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
alpha, beta, _ = self(x)
beta_dist = D.Beta(alpha, beta)
action = beta_dist.sample()
self.train()
return action.detach().cpu().numpy()
def make_policy(
representation_dim: int,
action_dim: int,
distribution: str,
hidden_dimensions: List[int],
nonlinearity: str,
num_components: Optional[int] = None,
num_actions: Optional[int] = None,
action_bound: Optional[float] = None,
layernorm: bool = False,
log_param_min: float = -5,
log_param_max: float = 2,
) -> Union[
DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy
]:
"""Constructs a policy network from a given config.
The following config keys need to be specified:
- "representation_dim": int
- "action_dim": int
- "distribution": str
- "num_components": int
- "action_bound": float
- "hidden_dimensions": List[int]
- "nonlinearity": str
- "layernorm": bool
- "log_param_min": Optional[float]
- "log_param_max": Optional[float]
Parameters
----------
representation_dim: int
Dimensionality of the vector state space of the environment.
action_dim: int
Number of action dimensions in the environment.
distribution: str
Name of the policy distribution as string ["discrete", "beta", "normal"].
hidden_dimensions: List[int]
List specification of the MLP policy. Each int element in the list represents a hidden
layer in the network with the respective number of neurons.
nonlinearity: str
Nonlinearity (activation function) used in the policy network.
num_components: Optional[int] = None
Number of components for mixture distributions.
num_actions: Optional[int] = None
Number of available actions. Used in the discrete policy.
action_bound: Optional[float] = None
Action bounds for the squashed normal or squashed GMM policy.
layernorm: bool = False
Use Layernorm in the policy network if set to True.
log_param_min: float = -5
Lower bound of the learned log parameters (standard deviation for Normal distributions).
log_param_max: float = 2
Upper bound of the learned log parameters.
Returns
-------
Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]
Policy network intance.
"""
# basic config string preprocessing to ensure mapping works later
distribution = _process_str(distribution)
nonlinearity = _process_str(nonlinearity)
if distribution == "discrete":
return DiscretePolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_actions=cast(int, num_actions),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
)
elif distribution == "beta":
assert num_components
return GeneralizedBetaPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=cast(float, action_bound),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
assert num_components
if 1 < num_components:
return DiagonalGMMPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_components=num_components,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
return DiagonalNormalPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
|
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
mu, log_std = self.dist_head(x).chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, V_hat
| 436
| 464
|
from typing import ClassVar, List, Optional, Tuple, Callable, Union, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from alphazero.network.distributions import SquashedNormal, GeneralizedBeta
from alphazero.network.utils import (
_map_nonlinearities,
_process_str,
)
__all__ = [
"make_policy",
"DiagonalNormalPolicy",
"DiagonalGMMPolicy",
"GeneralizedBetaPolicy",
"DiscretePolicy",
]
class Policy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.action_bound = action_bound
# boundaries for the log standard deviation to increae training stability
self.log_param_min = log_param_min
self.log_param_max = log_param_max
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
components: int = getattr(self, "num_components", 1)
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, components={components}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, "
f"log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
@property
def bounds(self) -> np.ndarray:
if self.action_bound is None:
return np.array([-np.inf, np.inf], dtype=np.float32)
else:
return np.array([-self.action_bound, self.action_bound], dtype=np.float32)
@torch.no_grad()
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
raise NotImplementedError
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
x = self.trunk(x)
V_hat = self.value_head(x)
self.train()
return V_hat.detach().cpu().numpy()
class DiscretePolicy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
num_actions: int
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
# class variable
distribution_type: ClassVar[str] = "Categorical"
def __init__(
self,
representation_dim: int,
action_dim: int,
num_actions: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.num_actions = num_actions
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
self.distribution = D.Categorical
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], num_actions)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, "
f"hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
def _get_dist_params(
self, x: torch.Tensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
pi_logits = self.dist_head(x)
return pi_logits, V_hat
def forward(self, x: torch.FloatTensor) -> Tuple[D.Categorical, torch.FloatTensor]:
"""Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
"""
pi_logits, V_hat = self._get_dist_params(x)
dist = D.Categorical(logits=pi_logits)
# samples from dist have shape [batch_size, action_dim]
return dist, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pi_logits, V_hat = self._get_dist_params(states)
# This creates an independent distribution for each action possibility
# so that the batch_shape of the distribution is identical to the shape of actions
# It's needed so that the log_probs are of the proper shape [batch_size, num_actions]
# else this throws since the distribution's batch_shape=[batch_shape] doesn't match
# the shape of the actions tensor, which is [batch_size, num_actions]
num_actions = actions.shape[1]
pi_hat = D.Categorical(
logits=pi_logits.unsqueeze(dim=1).repeat((1, num_actions, 1))
)
log_probs = pi_hat.log_prob(actions)
entropy = pi_hat.entropy()
return log_probs, entropy, V_hat
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
_, V_hat = self(x)
self.train()
return V_hat.detach().cpu().numpy()
@torch.no_grad()
def predict_pi(self, x: torch.Tensor) -> np.ndarray:
self.eval()
logits, _ = self._get_dist_params(x)
self.train()
return F.softmax(logits, dim=-1).detach().cpu().numpy()
class DiagonalNormalPolicy(Policy):
"""Policy class for factorized normal distributions.
Learns parameters for a factorized normal distribution of types
Normal, TanhSquashedNormal or GeneralizedSquashedNormal.
Factorized means that a conditionally independent (given a state) 1D Normal distribution is
learned for each dimension of the action space instead of a Multivariate Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be a Normallike distribution.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviation.
log_param_max : int
Upper bound for learned log standard deviation.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalNormal"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
mu, log_std = self.dist_head(x).chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, V_hat = self(states)
# This aligns the distribution batch_shape with the number of actions at the root
# It can be thought of as generating num_actions identical normal distributions for each agent
# and then sampling the log_prob for action from the distribution
# num_actions = actions.shape[-1]
# mu = mu.expand((-1, num_actions))
# sigma = sigma.expand((-1, num_actions))
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
log_probs = normal.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, _ = self(x)
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
action = normal.sample()
self.train()
return action.detach().cpu().numpy()
class DiagonalGMMPolicy(Policy):
"""Policy class for learning a factorized GMM.
Learns a 1D GMM for each dimension of the action space.
The components of the GMM are either Normal or squashed Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be Normallike.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
num_components : int
Number of mixture components.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviations.
log_param_max : int
Upper bound for learned log standard deviations.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
num_components: int
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalGMM"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
num_components: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.num_components = num_components
# calculate the number of parameters needed for the GMM
# 2 comes from each distribution being specifiec by 2 parameters
dist_params = num_components * (2 * self.action_dim + 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], dist_params)
def forward(
self, x: torch.FloatTensor
) -> Tuple[
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components]
# the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components
# the elements in the second term (+ num_components) are the mixture coefficients
mixture_params = self.dist_head(x)
# get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim]
dist_params = mixture_params[
..., : self.num_components * 2 * self.action_dim
].view(x.shape[0], -1)
# get the num_components last tensor elements as logits for the mixture coefficients
log_coeff = mixture_params[..., -self.num_components :]
# split the dist_params along the middle dimension (2*num_components) into means and log stddevs
mu, log_std = dist_params.chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, log_coeff, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, log_coeff, V_hat = self(states)
# We need num_actions identical gmms to sample log_probs for each action
num_actions = actions.shape[-1]
mu = mu.unsqueeze(dim=1).expand((-1, num_actions, -1))
sigma = sigma.unsqueeze(dim=1).expand((-1, num_actions, -1))
log_coeff = log_coeff.unsqueeze(dim=1).expand((-1, num_actions, -1))
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
log_probs = gmm.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, log_coeff, _ = self(x)
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
action = gmm.sample()
self.train()
return action.detach().cpu().numpy()
class GeneralizedBetaPolicy(Policy):
"""Policy class for a generalized Beta distribution.
The beta distribution used by this class is generalized in that it has support
[-c, c] instead of [0,1].
This is achieved via a location-scale transformation (2c)x - c, where c are the desired bounds.
Since both parameters alpha, beta > 0, the log-learning-trick for the Normal standard deviation
is applied to both parameters.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log_alpha and log_beta.
log_param_max : int
Upper bound for learned log_alpha and log_beta.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: float
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "GeneralizedBeta"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: float,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
assert action_bound, "Beta policy needs action bounds specified."
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# create distribution parameters
dist_params = self.dist_head(x)
# Use the log_std_dev trick for alpha and beta
# since both alpha > 0 and beta > 0
dist_params = torch.clamp(
dist_params, min=self.log_param_min, max=self.log_param_max
)
alpha, beta = dist_params.exp().chunk(2, dim=-1)
return alpha, beta, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
alpha, beta, V_hat = self(states)
# ensure that the distribution batch_shape fits the number of actions taken for
# each agent at the root
num_actions = actions.shape[-1]
alpha = alpha.expand(-1, num_actions)
beta = beta.expand(-1, num_actions)
beta_dist = GeneralizedBeta(alpha, beta, self.action_bound)
log_probs = beta_dist.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
alpha, beta, _ = self(x)
beta_dist = D.Beta(alpha, beta)
action = beta_dist.sample()
self.train()
return action.detach().cpu().numpy()
def make_policy(
representation_dim: int,
action_dim: int,
distribution: str,
hidden_dimensions: List[int],
nonlinearity: str,
num_components: Optional[int] = None,
num_actions: Optional[int] = None,
action_bound: Optional[float] = None,
layernorm: bool = False,
log_param_min: float = -5,
log_param_max: float = 2,
) -> Union[
DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy
]:
"""Constructs a policy network from a given config.
The following config keys need to be specified:
- "representation_dim": int
- "action_dim": int
- "distribution": str
- "num_components": int
- "action_bound": float
- "hidden_dimensions": List[int]
- "nonlinearity": str
- "layernorm": bool
- "log_param_min": Optional[float]
- "log_param_max": Optional[float]
Parameters
----------
representation_dim: int
Dimensionality of the vector state space of the environment.
action_dim: int
Number of action dimensions in the environment.
distribution: str
Name of the policy distribution as string ["discrete", "beta", "normal"].
hidden_dimensions: List[int]
List specification of the MLP policy. Each int element in the list represents a hidden
layer in the network with the respective number of neurons.
nonlinearity: str
Nonlinearity (activation function) used in the policy network.
num_components: Optional[int] = None
Number of components for mixture distributions.
num_actions: Optional[int] = None
Number of available actions. Used in the discrete policy.
action_bound: Optional[float] = None
Action bounds for the squashed normal or squashed GMM policy.
layernorm: bool = False
Use Layernorm in the policy network if set to True.
log_param_min: float = -5
Lower bound of the learned log parameters (standard deviation for Normal distributions).
log_param_max: float = 2
Upper bound of the learned log parameters.
Returns
-------
Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]
Policy network intance.
"""
# basic config string preprocessing to ensure mapping works later
distribution = _process_str(distribution)
nonlinearity = _process_str(nonlinearity)
if distribution == "discrete":
return DiscretePolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_actions=cast(int, num_actions),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
)
elif distribution == "beta":
assert num_components
return GeneralizedBetaPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=cast(float, action_bound),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
assert num_components
if 1 < num_components:
return DiagonalGMMPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_components=num_components,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
return DiagonalNormalPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
|
forward
|
Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
|
from typing import ClassVar, List, Optional, Tuple, Callable, Union, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from alphazero.network.distributions import SquashedNormal, GeneralizedBeta
from alphazero.network.utils import (
_map_nonlinearities,
_process_str,
)
__all__ = [
"make_policy",
"DiagonalNormalPolicy",
"DiagonalGMMPolicy",
"GeneralizedBetaPolicy",
"DiscretePolicy",
]
class Policy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.action_bound = action_bound
# boundaries for the log standard deviation to increae training stability
self.log_param_min = log_param_min
self.log_param_max = log_param_max
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
components: int = getattr(self, "num_components", 1)
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, components={components}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, "
f"log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
@property
def bounds(self) -> np.ndarray:
if self.action_bound is None:
return np.array([-np.inf, np.inf], dtype=np.float32)
else:
return np.array([-self.action_bound, self.action_bound], dtype=np.float32)
@torch.no_grad()
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
raise NotImplementedError
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
x = self.trunk(x)
V_hat = self.value_head(x)
self.train()
return V_hat.detach().cpu().numpy()
class DiscretePolicy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
num_actions: int
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
# class variable
distribution_type: ClassVar[str] = "Categorical"
def __init__(
self,
representation_dim: int,
action_dim: int,
num_actions: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.num_actions = num_actions
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
self.distribution = D.Categorical
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], num_actions)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, "
f"hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
def _get_dist_params(
self, x: torch.Tensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
pi_logits = self.dist_head(x)
return pi_logits, V_hat
def forward(self, x: torch.FloatTensor) -> Tuple[D.Categorical, torch.FloatTensor]:
"""Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
"""
pi_logits, V_hat = self._get_dist_params(x)
dist = D.Categorical(logits=pi_logits)
# samples from dist have shape [batch_size, action_dim]
return dist, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pi_logits, V_hat = self._get_dist_params(states)
# This creates an independent distribution for each action possibility
# so that the batch_shape of the distribution is identical to the shape of actions
# It's needed so that the log_probs are of the proper shape [batch_size, num_actions]
# else this throws since the distribution's batch_shape=[batch_shape] doesn't match
# the shape of the actions tensor, which is [batch_size, num_actions]
num_actions = actions.shape[1]
pi_hat = D.Categorical(
logits=pi_logits.unsqueeze(dim=1).repeat((1, num_actions, 1))
)
log_probs = pi_hat.log_prob(actions)
entropy = pi_hat.entropy()
return log_probs, entropy, V_hat
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
_, V_hat = self(x)
self.train()
return V_hat.detach().cpu().numpy()
@torch.no_grad()
def predict_pi(self, x: torch.Tensor) -> np.ndarray:
self.eval()
logits, _ = self._get_dist_params(x)
self.train()
return F.softmax(logits, dim=-1).detach().cpu().numpy()
class DiagonalNormalPolicy(Policy):
"""Policy class for factorized normal distributions.
Learns parameters for a factorized normal distribution of types
Normal, TanhSquashedNormal or GeneralizedSquashedNormal.
Factorized means that a conditionally independent (given a state) 1D Normal distribution is
learned for each dimension of the action space instead of a Multivariate Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be a Normallike distribution.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviation.
log_param_max : int
Upper bound for learned log standard deviation.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalNormal"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
mu, log_std = self.dist_head(x).chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, V_hat = self(states)
# This aligns the distribution batch_shape with the number of actions at the root
# It can be thought of as generating num_actions identical normal distributions for each agent
# and then sampling the log_prob for action from the distribution
# num_actions = actions.shape[-1]
# mu = mu.expand((-1, num_actions))
# sigma = sigma.expand((-1, num_actions))
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
log_probs = normal.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, _ = self(x)
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
action = normal.sample()
self.train()
return action.detach().cpu().numpy()
class DiagonalGMMPolicy(Policy):
"""Policy class for learning a factorized GMM.
Learns a 1D GMM for each dimension of the action space.
The components of the GMM are either Normal or squashed Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be Normallike.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
num_components : int
Number of mixture components.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviations.
log_param_max : int
Upper bound for learned log standard deviations.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
num_components: int
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalGMM"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
num_components: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.num_components = num_components
# calculate the number of parameters needed for the GMM
# 2 comes from each distribution being specifiec by 2 parameters
dist_params = num_components * (2 * self.action_dim + 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], dist_params)
# MASKED: forward function (lines 590-631)
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, log_coeff, V_hat = self(states)
# We need num_actions identical gmms to sample log_probs for each action
num_actions = actions.shape[-1]
mu = mu.unsqueeze(dim=1).expand((-1, num_actions, -1))
sigma = sigma.unsqueeze(dim=1).expand((-1, num_actions, -1))
log_coeff = log_coeff.unsqueeze(dim=1).expand((-1, num_actions, -1))
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
log_probs = gmm.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, log_coeff, _ = self(x)
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
action = gmm.sample()
self.train()
return action.detach().cpu().numpy()
class GeneralizedBetaPolicy(Policy):
"""Policy class for a generalized Beta distribution.
The beta distribution used by this class is generalized in that it has support
[-c, c] instead of [0,1].
This is achieved via a location-scale transformation (2c)x - c, where c are the desired bounds.
Since both parameters alpha, beta > 0, the log-learning-trick for the Normal standard deviation
is applied to both parameters.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log_alpha and log_beta.
log_param_max : int
Upper bound for learned log_alpha and log_beta.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: float
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "GeneralizedBeta"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: float,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
assert action_bound, "Beta policy needs action bounds specified."
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# create distribution parameters
dist_params = self.dist_head(x)
# Use the log_std_dev trick for alpha and beta
# since both alpha > 0 and beta > 0
dist_params = torch.clamp(
dist_params, min=self.log_param_min, max=self.log_param_max
)
alpha, beta = dist_params.exp().chunk(2, dim=-1)
return alpha, beta, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
alpha, beta, V_hat = self(states)
# ensure that the distribution batch_shape fits the number of actions taken for
# each agent at the root
num_actions = actions.shape[-1]
alpha = alpha.expand(-1, num_actions)
beta = beta.expand(-1, num_actions)
beta_dist = GeneralizedBeta(alpha, beta, self.action_bound)
log_probs = beta_dist.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
alpha, beta, _ = self(x)
beta_dist = D.Beta(alpha, beta)
action = beta_dist.sample()
self.train()
return action.detach().cpu().numpy()
def make_policy(
representation_dim: int,
action_dim: int,
distribution: str,
hidden_dimensions: List[int],
nonlinearity: str,
num_components: Optional[int] = None,
num_actions: Optional[int] = None,
action_bound: Optional[float] = None,
layernorm: bool = False,
log_param_min: float = -5,
log_param_max: float = 2,
) -> Union[
DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy
]:
"""Constructs a policy network from a given config.
The following config keys need to be specified:
- "representation_dim": int
- "action_dim": int
- "distribution": str
- "num_components": int
- "action_bound": float
- "hidden_dimensions": List[int]
- "nonlinearity": str
- "layernorm": bool
- "log_param_min": Optional[float]
- "log_param_max": Optional[float]
Parameters
----------
representation_dim: int
Dimensionality of the vector state space of the environment.
action_dim: int
Number of action dimensions in the environment.
distribution: str
Name of the policy distribution as string ["discrete", "beta", "normal"].
hidden_dimensions: List[int]
List specification of the MLP policy. Each int element in the list represents a hidden
layer in the network with the respective number of neurons.
nonlinearity: str
Nonlinearity (activation function) used in the policy network.
num_components: Optional[int] = None
Number of components for mixture distributions.
num_actions: Optional[int] = None
Number of available actions. Used in the discrete policy.
action_bound: Optional[float] = None
Action bounds for the squashed normal or squashed GMM policy.
layernorm: bool = False
Use Layernorm in the policy network if set to True.
log_param_min: float = -5
Lower bound of the learned log parameters (standard deviation for Normal distributions).
log_param_max: float = 2
Upper bound of the learned log parameters.
Returns
-------
Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]
Policy network intance.
"""
# basic config string preprocessing to ensure mapping works later
distribution = _process_str(distribution)
nonlinearity = _process_str(nonlinearity)
if distribution == "discrete":
return DiscretePolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_actions=cast(int, num_actions),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
)
elif distribution == "beta":
assert num_components
return GeneralizedBetaPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=cast(float, action_bound),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
assert num_components
if 1 < num_components:
return DiagonalGMMPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_components=num_components,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
return DiagonalNormalPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
|
def forward(
self, x: torch.FloatTensor
) -> Tuple[
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components]
# the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components
# the elements in the second term (+ num_components) are the mixture coefficients
mixture_params = self.dist_head(x)
# get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim]
dist_params = mixture_params[
..., : self.num_components * 2 * self.action_dim
].view(x.shape[0], -1)
# get the num_components last tensor elements as logits for the mixture coefficients
log_coeff = mixture_params[..., -self.num_components :]
# split the dist_params along the middle dimension (2*num_components) into means and log stddevs
mu, log_std = dist_params.chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, log_coeff, V_hat
| 590
| 631
|
from typing import ClassVar, List, Optional, Tuple, Callable, Union, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from alphazero.network.distributions import SquashedNormal, GeneralizedBeta
from alphazero.network.utils import (
_map_nonlinearities,
_process_str,
)
__all__ = [
"make_policy",
"DiagonalNormalPolicy",
"DiagonalGMMPolicy",
"GeneralizedBetaPolicy",
"DiscretePolicy",
]
class Policy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.action_bound = action_bound
# boundaries for the log standard deviation to increae training stability
self.log_param_min = log_param_min
self.log_param_max = log_param_max
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
components: int = getattr(self, "num_components", 1)
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, components={components}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, "
f"log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
@property
def bounds(self) -> np.ndarray:
if self.action_bound is None:
return np.array([-np.inf, np.inf], dtype=np.float32)
else:
return np.array([-self.action_bound, self.action_bound], dtype=np.float32)
@torch.no_grad()
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
raise NotImplementedError
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
x = self.trunk(x)
V_hat = self.value_head(x)
self.train()
return V_hat.detach().cpu().numpy()
class DiscretePolicy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
num_actions: int
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
# class variable
distribution_type: ClassVar[str] = "Categorical"
def __init__(
self,
representation_dim: int,
action_dim: int,
num_actions: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.num_actions = num_actions
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
self.distribution = D.Categorical
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], num_actions)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, "
f"hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
def _get_dist_params(
self, x: torch.Tensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
pi_logits = self.dist_head(x)
return pi_logits, V_hat
def forward(self, x: torch.FloatTensor) -> Tuple[D.Categorical, torch.FloatTensor]:
"""Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
"""
pi_logits, V_hat = self._get_dist_params(x)
dist = D.Categorical(logits=pi_logits)
# samples from dist have shape [batch_size, action_dim]
return dist, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pi_logits, V_hat = self._get_dist_params(states)
# This creates an independent distribution for each action possibility
# so that the batch_shape of the distribution is identical to the shape of actions
# It's needed so that the log_probs are of the proper shape [batch_size, num_actions]
# else this throws since the distribution's batch_shape=[batch_shape] doesn't match
# the shape of the actions tensor, which is [batch_size, num_actions]
num_actions = actions.shape[1]
pi_hat = D.Categorical(
logits=pi_logits.unsqueeze(dim=1).repeat((1, num_actions, 1))
)
log_probs = pi_hat.log_prob(actions)
entropy = pi_hat.entropy()
return log_probs, entropy, V_hat
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
_, V_hat = self(x)
self.train()
return V_hat.detach().cpu().numpy()
@torch.no_grad()
def predict_pi(self, x: torch.Tensor) -> np.ndarray:
self.eval()
logits, _ = self._get_dist_params(x)
self.train()
return F.softmax(logits, dim=-1).detach().cpu().numpy()
class DiagonalNormalPolicy(Policy):
"""Policy class for factorized normal distributions.
Learns parameters for a factorized normal distribution of types
Normal, TanhSquashedNormal or GeneralizedSquashedNormal.
Factorized means that a conditionally independent (given a state) 1D Normal distribution is
learned for each dimension of the action space instead of a Multivariate Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be a Normallike distribution.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviation.
log_param_max : int
Upper bound for learned log standard deviation.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalNormal"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
mu, log_std = self.dist_head(x).chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, V_hat = self(states)
# This aligns the distribution batch_shape with the number of actions at the root
# It can be thought of as generating num_actions identical normal distributions for each agent
# and then sampling the log_prob for action from the distribution
# num_actions = actions.shape[-1]
# mu = mu.expand((-1, num_actions))
# sigma = sigma.expand((-1, num_actions))
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
log_probs = normal.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, _ = self(x)
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
action = normal.sample()
self.train()
return action.detach().cpu().numpy()
class DiagonalGMMPolicy(Policy):
"""Policy class for learning a factorized GMM.
Learns a 1D GMM for each dimension of the action space.
The components of the GMM are either Normal or squashed Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be Normallike.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
num_components : int
Number of mixture components.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviations.
log_param_max : int
Upper bound for learned log standard deviations.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
num_components: int
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalGMM"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
num_components: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.num_components = num_components
# calculate the number of parameters needed for the GMM
# 2 comes from each distribution being specifiec by 2 parameters
dist_params = num_components * (2 * self.action_dim + 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], dist_params)
def forward(
self, x: torch.FloatTensor
) -> Tuple[
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components]
# the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components
# the elements in the second term (+ num_components) are the mixture coefficients
mixture_params = self.dist_head(x)
# get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim]
dist_params = mixture_params[
..., : self.num_components * 2 * self.action_dim
].view(x.shape[0], -1)
# get the num_components last tensor elements as logits for the mixture coefficients
log_coeff = mixture_params[..., -self.num_components :]
# split the dist_params along the middle dimension (2*num_components) into means and log stddevs
mu, log_std = dist_params.chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, log_coeff, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, log_coeff, V_hat = self(states)
# We need num_actions identical gmms to sample log_probs for each action
num_actions = actions.shape[-1]
mu = mu.unsqueeze(dim=1).expand((-1, num_actions, -1))
sigma = sigma.unsqueeze(dim=1).expand((-1, num_actions, -1))
log_coeff = log_coeff.unsqueeze(dim=1).expand((-1, num_actions, -1))
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
log_probs = gmm.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, log_coeff, _ = self(x)
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
action = gmm.sample()
self.train()
return action.detach().cpu().numpy()
class GeneralizedBetaPolicy(Policy):
"""Policy class for a generalized Beta distribution.
The beta distribution used by this class is generalized in that it has support
[-c, c] instead of [0,1].
This is achieved via a location-scale transformation (2c)x - c, where c are the desired bounds.
Since both parameters alpha, beta > 0, the log-learning-trick for the Normal standard deviation
is applied to both parameters.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log_alpha and log_beta.
log_param_max : int
Upper bound for learned log_alpha and log_beta.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: float
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "GeneralizedBeta"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: float,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
assert action_bound, "Beta policy needs action bounds specified."
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# create distribution parameters
dist_params = self.dist_head(x)
# Use the log_std_dev trick for alpha and beta
# since both alpha > 0 and beta > 0
dist_params = torch.clamp(
dist_params, min=self.log_param_min, max=self.log_param_max
)
alpha, beta = dist_params.exp().chunk(2, dim=-1)
return alpha, beta, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
alpha, beta, V_hat = self(states)
# ensure that the distribution batch_shape fits the number of actions taken for
# each agent at the root
num_actions = actions.shape[-1]
alpha = alpha.expand(-1, num_actions)
beta = beta.expand(-1, num_actions)
beta_dist = GeneralizedBeta(alpha, beta, self.action_bound)
log_probs = beta_dist.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
alpha, beta, _ = self(x)
beta_dist = D.Beta(alpha, beta)
action = beta_dist.sample()
self.train()
return action.detach().cpu().numpy()
def make_policy(
representation_dim: int,
action_dim: int,
distribution: str,
hidden_dimensions: List[int],
nonlinearity: str,
num_components: Optional[int] = None,
num_actions: Optional[int] = None,
action_bound: Optional[float] = None,
layernorm: bool = False,
log_param_min: float = -5,
log_param_max: float = 2,
) -> Union[
DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy
]:
"""Constructs a policy network from a given config.
The following config keys need to be specified:
- "representation_dim": int
- "action_dim": int
- "distribution": str
- "num_components": int
- "action_bound": float
- "hidden_dimensions": List[int]
- "nonlinearity": str
- "layernorm": bool
- "log_param_min": Optional[float]
- "log_param_max": Optional[float]
Parameters
----------
representation_dim: int
Dimensionality of the vector state space of the environment.
action_dim: int
Number of action dimensions in the environment.
distribution: str
Name of the policy distribution as string ["discrete", "beta", "normal"].
hidden_dimensions: List[int]
List specification of the MLP policy. Each int element in the list represents a hidden
layer in the network with the respective number of neurons.
nonlinearity: str
Nonlinearity (activation function) used in the policy network.
num_components: Optional[int] = None
Number of components for mixture distributions.
num_actions: Optional[int] = None
Number of available actions. Used in the discrete policy.
action_bound: Optional[float] = None
Action bounds for the squashed normal or squashed GMM policy.
layernorm: bool = False
Use Layernorm in the policy network if set to True.
log_param_min: float = -5
Lower bound of the learned log parameters (standard deviation for Normal distributions).
log_param_max: float = 2
Upper bound of the learned log parameters.
Returns
-------
Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]
Policy network intance.
"""
# basic config string preprocessing to ensure mapping works later
distribution = _process_str(distribution)
nonlinearity = _process_str(nonlinearity)
if distribution == "discrete":
return DiscretePolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_actions=cast(int, num_actions),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
)
elif distribution == "beta":
assert num_components
return GeneralizedBetaPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=cast(float, action_bound),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
assert num_components
if 1 < num_components:
return DiagonalGMMPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_components=num_components,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
return DiagonalNormalPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
|
forward
|
Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
|
from typing import ClassVar, List, Optional, Tuple, Callable, Union, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from alphazero.network.distributions import SquashedNormal, GeneralizedBeta
from alphazero.network.utils import (
_map_nonlinearities,
_process_str,
)
__all__ = [
"make_policy",
"DiagonalNormalPolicy",
"DiagonalGMMPolicy",
"GeneralizedBetaPolicy",
"DiscretePolicy",
]
class Policy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.action_bound = action_bound
# boundaries for the log standard deviation to increae training stability
self.log_param_min = log_param_min
self.log_param_max = log_param_max
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
components: int = getattr(self, "num_components", 1)
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, components={components}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, "
f"log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
@property
def bounds(self) -> np.ndarray:
if self.action_bound is None:
return np.array([-np.inf, np.inf], dtype=np.float32)
else:
return np.array([-self.action_bound, self.action_bound], dtype=np.float32)
@torch.no_grad()
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
raise NotImplementedError
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
x = self.trunk(x)
V_hat = self.value_head(x)
self.train()
return V_hat.detach().cpu().numpy()
class DiscretePolicy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
num_actions: int
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
# class variable
distribution_type: ClassVar[str] = "Categorical"
def __init__(
self,
representation_dim: int,
action_dim: int,
num_actions: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.num_actions = num_actions
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
self.distribution = D.Categorical
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], num_actions)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, "
f"hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
def _get_dist_params(
self, x: torch.Tensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
pi_logits = self.dist_head(x)
return pi_logits, V_hat
def forward(self, x: torch.FloatTensor) -> Tuple[D.Categorical, torch.FloatTensor]:
"""Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
"""
pi_logits, V_hat = self._get_dist_params(x)
dist = D.Categorical(logits=pi_logits)
# samples from dist have shape [batch_size, action_dim]
return dist, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pi_logits, V_hat = self._get_dist_params(states)
# This creates an independent distribution for each action possibility
# so that the batch_shape of the distribution is identical to the shape of actions
# It's needed so that the log_probs are of the proper shape [batch_size, num_actions]
# else this throws since the distribution's batch_shape=[batch_shape] doesn't match
# the shape of the actions tensor, which is [batch_size, num_actions]
num_actions = actions.shape[1]
pi_hat = D.Categorical(
logits=pi_logits.unsqueeze(dim=1).repeat((1, num_actions, 1))
)
log_probs = pi_hat.log_prob(actions)
entropy = pi_hat.entropy()
return log_probs, entropy, V_hat
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
_, V_hat = self(x)
self.train()
return V_hat.detach().cpu().numpy()
@torch.no_grad()
def predict_pi(self, x: torch.Tensor) -> np.ndarray:
self.eval()
logits, _ = self._get_dist_params(x)
self.train()
return F.softmax(logits, dim=-1).detach().cpu().numpy()
class DiagonalNormalPolicy(Policy):
"""Policy class for factorized normal distributions.
Learns parameters for a factorized normal distribution of types
Normal, TanhSquashedNormal or GeneralizedSquashedNormal.
Factorized means that a conditionally independent (given a state) 1D Normal distribution is
learned for each dimension of the action space instead of a Multivariate Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be a Normallike distribution.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviation.
log_param_max : int
Upper bound for learned log standard deviation.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalNormal"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
mu, log_std = self.dist_head(x).chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, V_hat = self(states)
# This aligns the distribution batch_shape with the number of actions at the root
# It can be thought of as generating num_actions identical normal distributions for each agent
# and then sampling the log_prob for action from the distribution
# num_actions = actions.shape[-1]
# mu = mu.expand((-1, num_actions))
# sigma = sigma.expand((-1, num_actions))
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
log_probs = normal.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, _ = self(x)
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
action = normal.sample()
self.train()
return action.detach().cpu().numpy()
class DiagonalGMMPolicy(Policy):
"""Policy class for learning a factorized GMM.
Learns a 1D GMM for each dimension of the action space.
The components of the GMM are either Normal or squashed Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be Normallike.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
num_components : int
Number of mixture components.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviations.
log_param_max : int
Upper bound for learned log standard deviations.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
num_components: int
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalGMM"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
num_components: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.num_components = num_components
# calculate the number of parameters needed for the GMM
# 2 comes from each distribution being specifiec by 2 parameters
dist_params = num_components * (2 * self.action_dim + 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], dist_params)
def forward(
self, x: torch.FloatTensor
) -> Tuple[
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components]
# the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components
# the elements in the second term (+ num_components) are the mixture coefficients
mixture_params = self.dist_head(x)
# get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim]
dist_params = mixture_params[
..., : self.num_components * 2 * self.action_dim
].view(x.shape[0], -1)
# get the num_components last tensor elements as logits for the mixture coefficients
log_coeff = mixture_params[..., -self.num_components :]
# split the dist_params along the middle dimension (2*num_components) into means and log stddevs
mu, log_std = dist_params.chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, log_coeff, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, log_coeff, V_hat = self(states)
# We need num_actions identical gmms to sample log_probs for each action
num_actions = actions.shape[-1]
mu = mu.unsqueeze(dim=1).expand((-1, num_actions, -1))
sigma = sigma.unsqueeze(dim=1).expand((-1, num_actions, -1))
log_coeff = log_coeff.unsqueeze(dim=1).expand((-1, num_actions, -1))
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
log_probs = gmm.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, log_coeff, _ = self(x)
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
action = gmm.sample()
self.train()
return action.detach().cpu().numpy()
class GeneralizedBetaPolicy(Policy):
"""Policy class for a generalized Beta distribution.
The beta distribution used by this class is generalized in that it has support
[-c, c] instead of [0,1].
This is achieved via a location-scale transformation (2c)x - c, where c are the desired bounds.
Since both parameters alpha, beta > 0, the log-learning-trick for the Normal standard deviation
is applied to both parameters.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log_alpha and log_beta.
log_param_max : int
Upper bound for learned log_alpha and log_beta.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: float
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "GeneralizedBeta"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: float,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
assert action_bound, "Beta policy needs action bounds specified."
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
# MASKED: forward function (lines 750-778)
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
alpha, beta, V_hat = self(states)
# ensure that the distribution batch_shape fits the number of actions taken for
# each agent at the root
num_actions = actions.shape[-1]
alpha = alpha.expand(-1, num_actions)
beta = beta.expand(-1, num_actions)
beta_dist = GeneralizedBeta(alpha, beta, self.action_bound)
log_probs = beta_dist.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
alpha, beta, _ = self(x)
beta_dist = D.Beta(alpha, beta)
action = beta_dist.sample()
self.train()
return action.detach().cpu().numpy()
def make_policy(
representation_dim: int,
action_dim: int,
distribution: str,
hidden_dimensions: List[int],
nonlinearity: str,
num_components: Optional[int] = None,
num_actions: Optional[int] = None,
action_bound: Optional[float] = None,
layernorm: bool = False,
log_param_min: float = -5,
log_param_max: float = 2,
) -> Union[
DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy
]:
"""Constructs a policy network from a given config.
The following config keys need to be specified:
- "representation_dim": int
- "action_dim": int
- "distribution": str
- "num_components": int
- "action_bound": float
- "hidden_dimensions": List[int]
- "nonlinearity": str
- "layernorm": bool
- "log_param_min": Optional[float]
- "log_param_max": Optional[float]
Parameters
----------
representation_dim: int
Dimensionality of the vector state space of the environment.
action_dim: int
Number of action dimensions in the environment.
distribution: str
Name of the policy distribution as string ["discrete", "beta", "normal"].
hidden_dimensions: List[int]
List specification of the MLP policy. Each int element in the list represents a hidden
layer in the network with the respective number of neurons.
nonlinearity: str
Nonlinearity (activation function) used in the policy network.
num_components: Optional[int] = None
Number of components for mixture distributions.
num_actions: Optional[int] = None
Number of available actions. Used in the discrete policy.
action_bound: Optional[float] = None
Action bounds for the squashed normal or squashed GMM policy.
layernorm: bool = False
Use Layernorm in the policy network if set to True.
log_param_min: float = -5
Lower bound of the learned log parameters (standard deviation for Normal distributions).
log_param_max: float = 2
Upper bound of the learned log parameters.
Returns
-------
Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]
Policy network intance.
"""
# basic config string preprocessing to ensure mapping works later
distribution = _process_str(distribution)
nonlinearity = _process_str(nonlinearity)
if distribution == "discrete":
return DiscretePolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_actions=cast(int, num_actions),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
)
elif distribution == "beta":
assert num_components
return GeneralizedBetaPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=cast(float, action_bound),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
assert num_components
if 1 < num_components:
return DiagonalGMMPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_components=num_components,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
return DiagonalNormalPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
|
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# create distribution parameters
dist_params = self.dist_head(x)
# Use the log_std_dev trick for alpha and beta
# since both alpha > 0 and beta > 0
dist_params = torch.clamp(
dist_params, min=self.log_param_min, max=self.log_param_max
)
alpha, beta = dist_params.exp().chunk(2, dim=-1)
return alpha, beta, V_hat
| 750
| 778
|
from typing import ClassVar, List, Optional, Tuple, Callable, Union, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from alphazero.network.distributions import SquashedNormal, GeneralizedBeta
from alphazero.network.utils import (
_map_nonlinearities,
_process_str,
)
__all__ = [
"make_policy",
"DiagonalNormalPolicy",
"DiagonalGMMPolicy",
"GeneralizedBetaPolicy",
"DiscretePolicy",
]
class Policy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.action_bound = action_bound
# boundaries for the log standard deviation to increae training stability
self.log_param_min = log_param_min
self.log_param_max = log_param_max
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
components: int = getattr(self, "num_components", 1)
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, components={components}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, "
f"log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
@property
def bounds(self) -> np.ndarray:
if self.action_bound is None:
return np.array([-np.inf, np.inf], dtype=np.float32)
else:
return np.array([-self.action_bound, self.action_bound], dtype=np.float32)
@torch.no_grad()
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
raise NotImplementedError
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
x = self.trunk(x)
V_hat = self.value_head(x)
self.train()
return V_hat.detach().cpu().numpy()
class DiscretePolicy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
num_actions: int
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
# class variable
distribution_type: ClassVar[str] = "Categorical"
def __init__(
self,
representation_dim: int,
action_dim: int,
num_actions: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.num_actions = num_actions
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
self.distribution = D.Categorical
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], num_actions)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, "
f"hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
def _get_dist_params(
self, x: torch.Tensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
pi_logits = self.dist_head(x)
return pi_logits, V_hat
def forward(self, x: torch.FloatTensor) -> Tuple[D.Categorical, torch.FloatTensor]:
"""Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
"""
pi_logits, V_hat = self._get_dist_params(x)
dist = D.Categorical(logits=pi_logits)
# samples from dist have shape [batch_size, action_dim]
return dist, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pi_logits, V_hat = self._get_dist_params(states)
# This creates an independent distribution for each action possibility
# so that the batch_shape of the distribution is identical to the shape of actions
# It's needed so that the log_probs are of the proper shape [batch_size, num_actions]
# else this throws since the distribution's batch_shape=[batch_shape] doesn't match
# the shape of the actions tensor, which is [batch_size, num_actions]
num_actions = actions.shape[1]
pi_hat = D.Categorical(
logits=pi_logits.unsqueeze(dim=1).repeat((1, num_actions, 1))
)
log_probs = pi_hat.log_prob(actions)
entropy = pi_hat.entropy()
return log_probs, entropy, V_hat
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
_, V_hat = self(x)
self.train()
return V_hat.detach().cpu().numpy()
@torch.no_grad()
def predict_pi(self, x: torch.Tensor) -> np.ndarray:
self.eval()
logits, _ = self._get_dist_params(x)
self.train()
return F.softmax(logits, dim=-1).detach().cpu().numpy()
class DiagonalNormalPolicy(Policy):
"""Policy class for factorized normal distributions.
Learns parameters for a factorized normal distribution of types
Normal, TanhSquashedNormal or GeneralizedSquashedNormal.
Factorized means that a conditionally independent (given a state) 1D Normal distribution is
learned for each dimension of the action space instead of a Multivariate Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be a Normallike distribution.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviation.
log_param_max : int
Upper bound for learned log standard deviation.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalNormal"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
mu, log_std = self.dist_head(x).chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, V_hat = self(states)
# This aligns the distribution batch_shape with the number of actions at the root
# It can be thought of as generating num_actions identical normal distributions for each agent
# and then sampling the log_prob for action from the distribution
# num_actions = actions.shape[-1]
# mu = mu.expand((-1, num_actions))
# sigma = sigma.expand((-1, num_actions))
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
log_probs = normal.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, _ = self(x)
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
action = normal.sample()
self.train()
return action.detach().cpu().numpy()
class DiagonalGMMPolicy(Policy):
"""Policy class for learning a factorized GMM.
Learns a 1D GMM for each dimension of the action space.
The components of the GMM are either Normal or squashed Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be Normallike.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
num_components : int
Number of mixture components.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviations.
log_param_max : int
Upper bound for learned log standard deviations.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
num_components: int
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalGMM"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
num_components: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.num_components = num_components
# calculate the number of parameters needed for the GMM
# 2 comes from each distribution being specifiec by 2 parameters
dist_params = num_components * (2 * self.action_dim + 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], dist_params)
def forward(
self, x: torch.FloatTensor
) -> Tuple[
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components]
# the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components
# the elements in the second term (+ num_components) are the mixture coefficients
mixture_params = self.dist_head(x)
# get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim]
dist_params = mixture_params[
..., : self.num_components * 2 * self.action_dim
].view(x.shape[0], -1)
# get the num_components last tensor elements as logits for the mixture coefficients
log_coeff = mixture_params[..., -self.num_components :]
# split the dist_params along the middle dimension (2*num_components) into means and log stddevs
mu, log_std = dist_params.chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, log_coeff, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, log_coeff, V_hat = self(states)
# We need num_actions identical gmms to sample log_probs for each action
num_actions = actions.shape[-1]
mu = mu.unsqueeze(dim=1).expand((-1, num_actions, -1))
sigma = sigma.unsqueeze(dim=1).expand((-1, num_actions, -1))
log_coeff = log_coeff.unsqueeze(dim=1).expand((-1, num_actions, -1))
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
log_probs = gmm.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, log_coeff, _ = self(x)
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
action = gmm.sample()
self.train()
return action.detach().cpu().numpy()
class GeneralizedBetaPolicy(Policy):
"""Policy class for a generalized Beta distribution.
The beta distribution used by this class is generalized in that it has support
[-c, c] instead of [0,1].
This is achieved via a location-scale transformation (2c)x - c, where c are the desired bounds.
Since both parameters alpha, beta > 0, the log-learning-trick for the Normal standard deviation
is applied to both parameters.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log_alpha and log_beta.
log_param_max : int
Upper bound for learned log_alpha and log_beta.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: float
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "GeneralizedBeta"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: float,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
assert action_bound, "Beta policy needs action bounds specified."
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# create distribution parameters
dist_params = self.dist_head(x)
# Use the log_std_dev trick for alpha and beta
# since both alpha > 0 and beta > 0
dist_params = torch.clamp(
dist_params, min=self.log_param_min, max=self.log_param_max
)
alpha, beta = dist_params.exp().chunk(2, dim=-1)
return alpha, beta, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
alpha, beta, V_hat = self(states)
# ensure that the distribution batch_shape fits the number of actions taken for
# each agent at the root
num_actions = actions.shape[-1]
alpha = alpha.expand(-1, num_actions)
beta = beta.expand(-1, num_actions)
beta_dist = GeneralizedBeta(alpha, beta, self.action_bound)
log_probs = beta_dist.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
alpha, beta, _ = self(x)
beta_dist = D.Beta(alpha, beta)
action = beta_dist.sample()
self.train()
return action.detach().cpu().numpy()
def make_policy(
representation_dim: int,
action_dim: int,
distribution: str,
hidden_dimensions: List[int],
nonlinearity: str,
num_components: Optional[int] = None,
num_actions: Optional[int] = None,
action_bound: Optional[float] = None,
layernorm: bool = False,
log_param_min: float = -5,
log_param_max: float = 2,
) -> Union[
DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy
]:
"""Constructs a policy network from a given config.
The following config keys need to be specified:
- "representation_dim": int
- "action_dim": int
- "distribution": str
- "num_components": int
- "action_bound": float
- "hidden_dimensions": List[int]
- "nonlinearity": str
- "layernorm": bool
- "log_param_min": Optional[float]
- "log_param_max": Optional[float]
Parameters
----------
representation_dim: int
Dimensionality of the vector state space of the environment.
action_dim: int
Number of action dimensions in the environment.
distribution: str
Name of the policy distribution as string ["discrete", "beta", "normal"].
hidden_dimensions: List[int]
List specification of the MLP policy. Each int element in the list represents a hidden
layer in the network with the respective number of neurons.
nonlinearity: str
Nonlinearity (activation function) used in the policy network.
num_components: Optional[int] = None
Number of components for mixture distributions.
num_actions: Optional[int] = None
Number of available actions. Used in the discrete policy.
action_bound: Optional[float] = None
Action bounds for the squashed normal or squashed GMM policy.
layernorm: bool = False
Use Layernorm in the policy network if set to True.
log_param_min: float = -5
Lower bound of the learned log parameters (standard deviation for Normal distributions).
log_param_max: float = 2
Upper bound of the learned log parameters.
Returns
-------
Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]
Policy network intance.
"""
# basic config string preprocessing to ensure mapping works later
distribution = _process_str(distribution)
nonlinearity = _process_str(nonlinearity)
if distribution == "discrete":
return DiscretePolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_actions=cast(int, num_actions),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
)
elif distribution == "beta":
assert num_components
return GeneralizedBetaPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=cast(float, action_bound),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
assert num_components
if 1 < num_components:
return DiagonalGMMPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_components=num_components,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
return DiagonalNormalPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
|
push
|
Pushes an element onto the stack.
Time Complexity: O(1)
Args:
x: item to be added
|
"""
Min Stack
-----
A LIFO abstract data type that serves as a collection of elements.
Supports retrieving the min from the stack in constant time.
"""
class MinStack(object):
def __init__(self):
"""
Attributes:
data (arr): data stored in the stack
minimum (arr): minimum values of data stored
"""
self.data = []
self.minimum = []
def empty(self):
"""
Returns whether or not the stack is empty.
Time Complexity: O(1)
Returns:
bool: whether or not the stack is empty
"""
return len(self.data) == 0
# MASKED: push function (lines 31-42)
def pop(self):
"""
Pops an element off the stack.
Time Complexity: O(1)
Returns:
any: the last element on the stack
"""
x = self.data.pop()
if x == self.minimum[-1]:
self.minimum.pop()
return x
def peek(self):
"""
Returns the last item on the stack but doesn't remove it.
Time Complexity: O(1)
"""
return self.data[-1]
def peek_min(self):
"""
Returns the min on the stack but doesn't remove it.
Time Complexity: O(1)
"""
return self.minimum[-1]
|
def push(self, x):
"""
Pushes an element onto the stack.
Time Complexity: O(1)
Args:
x: item to be added
"""
self.data.append(x)
if not self.minimum or x <= self.minimum[-1]:
self.minimum.append(x)
| 31
| 42
|
"""
Min Stack
-----
A LIFO abstract data type that serves as a collection of elements.
Supports retrieving the min from the stack in constant time.
"""
class MinStack(object):
def __init__(self):
"""
Attributes:
data (arr): data stored in the stack
minimum (arr): minimum values of data stored
"""
self.data = []
self.minimum = []
def empty(self):
"""
Returns whether or not the stack is empty.
Time Complexity: O(1)
Returns:
bool: whether or not the stack is empty
"""
return len(self.data) == 0
def push(self, x):
"""
Pushes an element onto the stack.
Time Complexity: O(1)
Args:
x: item to be added
"""
self.data.append(x)
if not self.minimum or x <= self.minimum[-1]:
self.minimum.append(x)
def pop(self):
"""
Pops an element off the stack.
Time Complexity: O(1)
Returns:
any: the last element on the stack
"""
x = self.data.pop()
if x == self.minimum[-1]:
self.minimum.pop()
return x
def peek(self):
"""
Returns the last item on the stack but doesn't remove it.
Time Complexity: O(1)
"""
return self.data[-1]
def peek_min(self):
"""
Returns the min on the stack but doesn't remove it.
Time Complexity: O(1)
"""
return self.minimum[-1]
|
pop
|
Pops an element off the stack.
Time Complexity: O(1)
Returns:
any: the last element on the stack
|
"""
Min Stack
-----
A LIFO abstract data type that serves as a collection of elements.
Supports retrieving the min from the stack in constant time.
"""
class MinStack(object):
def __init__(self):
"""
Attributes:
data (arr): data stored in the stack
minimum (arr): minimum values of data stored
"""
self.data = []
self.minimum = []
def empty(self):
"""
Returns whether or not the stack is empty.
Time Complexity: O(1)
Returns:
bool: whether or not the stack is empty
"""
return len(self.data) == 0
def push(self, x):
"""
Pushes an element onto the stack.
Time Complexity: O(1)
Args:
x: item to be added
"""
self.data.append(x)
if not self.minimum or x <= self.minimum[-1]:
self.minimum.append(x)
# MASKED: pop function (lines 44-57)
def peek(self):
"""
Returns the last item on the stack but doesn't remove it.
Time Complexity: O(1)
"""
return self.data[-1]
def peek_min(self):
"""
Returns the min on the stack but doesn't remove it.
Time Complexity: O(1)
"""
return self.minimum[-1]
|
def pop(self):
"""
Pops an element off the stack.
Time Complexity: O(1)
Returns:
any: the last element on the stack
"""
x = self.data.pop()
if x == self.minimum[-1]:
self.minimum.pop()
return x
| 44
| 57
|
"""
Min Stack
-----
A LIFO abstract data type that serves as a collection of elements.
Supports retrieving the min from the stack in constant time.
"""
class MinStack(object):
def __init__(self):
"""
Attributes:
data (arr): data stored in the stack
minimum (arr): minimum values of data stored
"""
self.data = []
self.minimum = []
def empty(self):
"""
Returns whether or not the stack is empty.
Time Complexity: O(1)
Returns:
bool: whether or not the stack is empty
"""
return len(self.data) == 0
def push(self, x):
"""
Pushes an element onto the stack.
Time Complexity: O(1)
Args:
x: item to be added
"""
self.data.append(x)
if not self.minimum or x <= self.minimum[-1]:
self.minimum.append(x)
def pop(self):
"""
Pops an element off the stack.
Time Complexity: O(1)
Returns:
any: the last element on the stack
"""
x = self.data.pop()
if x == self.minimum[-1]:
self.minimum.pop()
return x
def peek(self):
"""
Returns the last item on the stack but doesn't remove it.
Time Complexity: O(1)
"""
return self.data[-1]
def peek_min(self):
"""
Returns the min on the stack but doesn't remove it.
Time Complexity: O(1)
"""
return self.minimum[-1]
|
filterCanny
|
The Canny detector is a multi-stage algorithm optimized for fast real-time edge detection,
which will reduce complexity of the image much further.
The algorithm will detect sharp changes in luminosity and will define them as edges.
The algorithm has the following stages:
- Noise reduction
- Intensity gradient - here it will apply a Sobel filter along the x and y axis to detect if edges are horizontal vertical or diagonal
- Non-maximum suppression - this shortens the frequency bandwith of the signal to sharpen it
- Hysteresis thresholding
|
import numpy as np
import cv2 as cv
import math
from server.cv_utils import *
def filterGaussian(img,size=(5,5),stdv=0):
"""Summary of filterGaussian
This will apply a noise reduction filter, we will use s 5x5 Gaussian filter to smooth
the image to lower the sensitivity to noise. (The smaller the size the less visible the blur)
To populate the Gaussian matrix we will use a kernel of normally distributed[stdv=1] numbers which will
set each pixel value equal to the weighted average of its neighboor pixels
The Gaussian distribution:
Gd = (1/2pi*stdv^2)exp(-((i-(k+1)^2) + (j - (k+1)^2))/(2*stdv^2))
i,j E [1,2k+1] for the kernel of size: (2k+1)x(2k+1)
"""
if not isCV(img):
raise ValueError("Image not in np.array format")
if not isinstance(size,tuple):
raise ValueError('filterGaussian: Size for Gaussian filter not tuple')
return cv.GaussianBlur(img,size,stdv)
# MASKED: filterCanny function (lines 28-52)
def segmentRegionOfInterest(img):
height = img.shape[0]
polygons = np.array([
[(200, height), (1100, height), (550, 250)]
])
mask = np.zeros_like(img)
# Fill poly-function deals with multiple polygon
cv.fillPoly(mask, polygons, 255)
# Bitwise operation between canny image and mask image
masked_image = cv.bitwise_and(img, mask)
return masked_image
def houghFilter(frame,distance_resolution=2,angle_resolution=np.pi/180,min_n_intersections=50,min_line_size=30,max_line_gap=5):
"""
Params:
frame
distance_resolution: distance resolution of accumulator in pixels, larger ==> less precision
angle_resolution: angle of accumulator in radians, larger ==> less precision
min_n_intersections: minimum number of intersections
min_line_size: minimum length of line in pixels
max_line_gap: maximum distance in pixels between disconnected lines
"""
placeholder = np.array([])
hough = cv.HoughLinesP(frame,distance_resolution,angle_resolution,min_n_intersections,placeholder,min_line_size,max_line_gap)
return hough
def calculateLines(img,lines):
"""
Combines line segments into one or two lanes
Note: By looking at the slop of a line we can see if it is on the left side (m<0) or right (m>0)
"""
def calculateCoordinates(img,line_params):
"""
Calculates the coordinates for a road lane
"""
#y = m*x +b, m= slope, b=intercept
height, width, _ = img.shape
m, b = line_params
y1 = height
y2 = int(y1 * (1/2)) # make points from middle of the frame down
# bound the coordinates within the frame
x1 = max(-width, min(2 * width, int((y1 - b) / m)))
x2 = max(-width, min(2 * width, int((y2 - b) / m)))
return np.array([x1,y1, x2,y2])
lane_lines = []
if lines is None:
return np.array(lane_lines)
height, width, _ = img.shape
left_lines, right_lines = [], []
boundary = 1/3
left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen
right_region_boundary = width * boundary # right lane line segment should be on left 2/3 of the screen
for line in lines:
x1,y1, x2,y2 = line.reshape(4)
if x1 == x2:
#Vertical line
continue
#Fit a polynomial to the points to get the slope and intercept
line_params = np.polyfit((x1,x2), (y1,y2), 1)
slope,intercept = line_params[0], line_params[1]
if slope < 0: #left side
if x1 < left_region_boundary and x2 < left_region_boundary:
left_lines.append((slope,intercept))
else: #right
if x1 > right_region_boundary and x2 > right_region_boundary:
right_lines.append((slope,intercept))
left_lines_avg = np.average(left_lines,axis=0)
right_lines_avg = np.average(right_lines,axis=0)
if len(left_lines) > 0:
left_line = calculateCoordinates(img,left_lines_avg)
lane_lines.append(left_line)
if len(right_lines) > 0:
right_line = calculateCoordinates(img,right_lines_avg)
lane_lines.append(right_line)
return np.array(lane_lines)
def showMidLine(img,steering_angle,color=(0, 255, 0),thickness=5):
line_image = np.zeros_like(img)
height, width, _ = img.shape
# Note: the steering angle of:
# 0-89 degree: turn left
# 90 degree: going straight
# 91-180 degree: turn right
steering_angle_radian = steering_angle / 180.0 * math.pi
x1 = int(width / 2)
y1 = height
x2 = int(x1 - height / 2 / math.tan(steering_angle_radian))
y2 = int(height / 2)
cv.line(line_image, (x1, y1), (x2, y2), color, thickness)
return line_image
def showLines(img,lines,color=(255,0,0),thickness=5):
line_img = np.zeros(img.shape, dtype=np.uint8)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv.line(line_img, (x1,y1), (x2,y2), color, thickness)
return line_img
def calculateSteeringAngle(img,lines):
if len(lines) == 0:
return -90
height, width, _ = img.shape
if len(lines) == 1:
x1, _, x2, _ = lines[0]
x_offset = x2 - x1
else: #2 lines
_, _, left_x2, _ = lines[0]
_, _, right_x2, _ = lines[1]
camera_mid_offset_percent = 0.0 # 0.0 means car pointing to center, -0.03: car is centered to left, +0.03 means car pointing to right
mid = int(width / 2 * (1 + camera_mid_offset_percent))
x_offset = (left_x2 + right_x2) / 2 - mid
# find the steering angle, which is angle between navigation direction to end of center line
y_offset = int(height / 2)
angle_to_mid_radian = math.atan(x_offset / y_offset) # angle (in radian) to center vertical line
angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi) # angle (in degrees) to center vertical line
steering_angle = angle_to_mid_deg + 90 # this is the steering angle needed by picar front wheel
return steering_angle
def stabilizeSteeringAngle(curr_steering_angle, new_steering_angle, num_of_lane_lines, max_angle_deviation_two_lines=2, max_angle_deviation_one_lane=1):
"""
Using last steering angle to stabilize the steering angle
This can be improved to use last N angles, etc
if new angle is too different from current angle, only turn by max_angle_deviation degrees
"""
if num_of_lane_lines == 1:
# if only one lane detected, don't deviate too much
max_angle_deviation = max_angle_deviation_one_lane
else:
# if both lane lines detected, then we can deviate more
max_angle_deviation = max_angle_deviation_two_lines
angle_deviation = new_steering_angle - curr_steering_angle
if abs(angle_deviation) > max_angle_deviation:
stabilized_steering_angle = int(curr_steering_angle
+ max_angle_deviation * angle_deviation / abs(angle_deviation))
else:
stabilized_steering_angle = new_steering_angle
return stabilized_steering_angle
|
def filterCanny(img,min_val=50,max_val=150,size=(5,5),stdv=0):
"""
The Canny detector is a multi-stage algorithm optimized for fast real-time edge detection,
which will reduce complexity of the image much further.
The algorithm will detect sharp changes in luminosity and will define them as edges.
The algorithm has the following stages:
- Noise reduction
- Intensity gradient - here it will apply a Sobel filter along the x and y axis to detect if edges are horizontal vertical or diagonal
- Non-maximum suppression - this shortens the frequency bandwith of the signal to sharpen it
- Hysteresis thresholding
"""
if not isCV(img):
raise ValueError("Image not in np.array format")
if min_val >= max_val:
raise ValueError('filterCanny: Value order incorrect')
gray_scale = toGrayScale(img)
#cv.imshow('Gray Scale image',gray_scale)
gaussian = filterGaussian(gray_scale,size=size,stdv=stdv)
#cv.imshow('Gaussian filter',gaussian)
return cv.Canny(gaussian,min_val,max_val)
| 28
| 52
|
import numpy as np
import cv2 as cv
import math
from server.cv_utils import *
def filterGaussian(img,size=(5,5),stdv=0):
"""Summary of filterGaussian
This will apply a noise reduction filter, we will use s 5x5 Gaussian filter to smooth
the image to lower the sensitivity to noise. (The smaller the size the less visible the blur)
To populate the Gaussian matrix we will use a kernel of normally distributed[stdv=1] numbers which will
set each pixel value equal to the weighted average of its neighboor pixels
The Gaussian distribution:
Gd = (1/2pi*stdv^2)exp(-((i-(k+1)^2) + (j - (k+1)^2))/(2*stdv^2))
i,j E [1,2k+1] for the kernel of size: (2k+1)x(2k+1)
"""
if not isCV(img):
raise ValueError("Image not in np.array format")
if not isinstance(size,tuple):
raise ValueError('filterGaussian: Size for Gaussian filter not tuple')
return cv.GaussianBlur(img,size,stdv)
def filterCanny(img,min_val=50,max_val=150,size=(5,5),stdv=0):
"""
The Canny detector is a multi-stage algorithm optimized for fast real-time edge detection,
which will reduce complexity of the image much further.
The algorithm will detect sharp changes in luminosity and will define them as edges.
The algorithm has the following stages:
- Noise reduction
- Intensity gradient - here it will apply a Sobel filter along the x and y axis to detect if edges are horizontal vertical or diagonal
- Non-maximum suppression - this shortens the frequency bandwith of the signal to sharpen it
- Hysteresis thresholding
"""
if not isCV(img):
raise ValueError("Image not in np.array format")
if min_val >= max_val:
raise ValueError('filterCanny: Value order incorrect')
gray_scale = toGrayScale(img)
#cv.imshow('Gray Scale image',gray_scale)
gaussian = filterGaussian(gray_scale,size=size,stdv=stdv)
#cv.imshow('Gaussian filter',gaussian)
return cv.Canny(gaussian,min_val,max_val)
def segmentRegionOfInterest(img):
height = img.shape[0]
polygons = np.array([
[(200, height), (1100, height), (550, 250)]
])
mask = np.zeros_like(img)
# Fill poly-function deals with multiple polygon
cv.fillPoly(mask, polygons, 255)
# Bitwise operation between canny image and mask image
masked_image = cv.bitwise_and(img, mask)
return masked_image
def houghFilter(frame,distance_resolution=2,angle_resolution=np.pi/180,min_n_intersections=50,min_line_size=30,max_line_gap=5):
"""
Params:
frame
distance_resolution: distance resolution of accumulator in pixels, larger ==> less precision
angle_resolution: angle of accumulator in radians, larger ==> less precision
min_n_intersections: minimum number of intersections
min_line_size: minimum length of line in pixels
max_line_gap: maximum distance in pixels between disconnected lines
"""
placeholder = np.array([])
hough = cv.HoughLinesP(frame,distance_resolution,angle_resolution,min_n_intersections,placeholder,min_line_size,max_line_gap)
return hough
def calculateLines(img,lines):
"""
Combines line segments into one or two lanes
Note: By looking at the slop of a line we can see if it is on the left side (m<0) or right (m>0)
"""
def calculateCoordinates(img,line_params):
"""
Calculates the coordinates for a road lane
"""
#y = m*x +b, m= slope, b=intercept
height, width, _ = img.shape
m, b = line_params
y1 = height
y2 = int(y1 * (1/2)) # make points from middle of the frame down
# bound the coordinates within the frame
x1 = max(-width, min(2 * width, int((y1 - b) / m)))
x2 = max(-width, min(2 * width, int((y2 - b) / m)))
return np.array([x1,y1, x2,y2])
lane_lines = []
if lines is None:
return np.array(lane_lines)
height, width, _ = img.shape
left_lines, right_lines = [], []
boundary = 1/3
left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen
right_region_boundary = width * boundary # right lane line segment should be on left 2/3 of the screen
for line in lines:
x1,y1, x2,y2 = line.reshape(4)
if x1 == x2:
#Vertical line
continue
#Fit a polynomial to the points to get the slope and intercept
line_params = np.polyfit((x1,x2), (y1,y2), 1)
slope,intercept = line_params[0], line_params[1]
if slope < 0: #left side
if x1 < left_region_boundary and x2 < left_region_boundary:
left_lines.append((slope,intercept))
else: #right
if x1 > right_region_boundary and x2 > right_region_boundary:
right_lines.append((slope,intercept))
left_lines_avg = np.average(left_lines,axis=0)
right_lines_avg = np.average(right_lines,axis=0)
if len(left_lines) > 0:
left_line = calculateCoordinates(img,left_lines_avg)
lane_lines.append(left_line)
if len(right_lines) > 0:
right_line = calculateCoordinates(img,right_lines_avg)
lane_lines.append(right_line)
return np.array(lane_lines)
def showMidLine(img,steering_angle,color=(0, 255, 0),thickness=5):
line_image = np.zeros_like(img)
height, width, _ = img.shape
# Note: the steering angle of:
# 0-89 degree: turn left
# 90 degree: going straight
# 91-180 degree: turn right
steering_angle_radian = steering_angle / 180.0 * math.pi
x1 = int(width / 2)
y1 = height
x2 = int(x1 - height / 2 / math.tan(steering_angle_radian))
y2 = int(height / 2)
cv.line(line_image, (x1, y1), (x2, y2), color, thickness)
return line_image
def showLines(img,lines,color=(255,0,0),thickness=5):
line_img = np.zeros(img.shape, dtype=np.uint8)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv.line(line_img, (x1,y1), (x2,y2), color, thickness)
return line_img
def calculateSteeringAngle(img,lines):
if len(lines) == 0:
return -90
height, width, _ = img.shape
if len(lines) == 1:
x1, _, x2, _ = lines[0]
x_offset = x2 - x1
else: #2 lines
_, _, left_x2, _ = lines[0]
_, _, right_x2, _ = lines[1]
camera_mid_offset_percent = 0.0 # 0.0 means car pointing to center, -0.03: car is centered to left, +0.03 means car pointing to right
mid = int(width / 2 * (1 + camera_mid_offset_percent))
x_offset = (left_x2 + right_x2) / 2 - mid
# find the steering angle, which is angle between navigation direction to end of center line
y_offset = int(height / 2)
angle_to_mid_radian = math.atan(x_offset / y_offset) # angle (in radian) to center vertical line
angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi) # angle (in degrees) to center vertical line
steering_angle = angle_to_mid_deg + 90 # this is the steering angle needed by picar front wheel
return steering_angle
def stabilizeSteeringAngle(curr_steering_angle, new_steering_angle, num_of_lane_lines, max_angle_deviation_two_lines=2, max_angle_deviation_one_lane=1):
"""
Using last steering angle to stabilize the steering angle
This can be improved to use last N angles, etc
if new angle is too different from current angle, only turn by max_angle_deviation degrees
"""
if num_of_lane_lines == 1:
# if only one lane detected, don't deviate too much
max_angle_deviation = max_angle_deviation_one_lane
else:
# if both lane lines detected, then we can deviate more
max_angle_deviation = max_angle_deviation_two_lines
angle_deviation = new_steering_angle - curr_steering_angle
if abs(angle_deviation) > max_angle_deviation:
stabilized_steering_angle = int(curr_steering_angle
+ max_angle_deviation * angle_deviation / abs(angle_deviation))
else:
stabilized_steering_angle = new_steering_angle
return stabilized_steering_angle
|
houghFilter
|
Params:
frame
distance_resolution: distance resolution of accumulator in pixels, larger ==> less precision
angle_resolution: angle of accumulator in radians, larger ==> less precision
min_n_intersections: minimum number of intersections
min_line_size: minimum length of line in pixels
max_line_gap: maximum distance in pixels between disconnected lines
|
import numpy as np
import cv2 as cv
import math
from server.cv_utils import *
def filterGaussian(img,size=(5,5),stdv=0):
"""Summary of filterGaussian
This will apply a noise reduction filter, we will use s 5x5 Gaussian filter to smooth
the image to lower the sensitivity to noise. (The smaller the size the less visible the blur)
To populate the Gaussian matrix we will use a kernel of normally distributed[stdv=1] numbers which will
set each pixel value equal to the weighted average of its neighboor pixels
The Gaussian distribution:
Gd = (1/2pi*stdv^2)exp(-((i-(k+1)^2) + (j - (k+1)^2))/(2*stdv^2))
i,j E [1,2k+1] for the kernel of size: (2k+1)x(2k+1)
"""
if not isCV(img):
raise ValueError("Image not in np.array format")
if not isinstance(size,tuple):
raise ValueError('filterGaussian: Size for Gaussian filter not tuple')
return cv.GaussianBlur(img,size,stdv)
def filterCanny(img,min_val=50,max_val=150,size=(5,5),stdv=0):
"""
The Canny detector is a multi-stage algorithm optimized for fast real-time edge detection,
which will reduce complexity of the image much further.
The algorithm will detect sharp changes in luminosity and will define them as edges.
The algorithm has the following stages:
- Noise reduction
- Intensity gradient - here it will apply a Sobel filter along the x and y axis to detect if edges are horizontal vertical or diagonal
- Non-maximum suppression - this shortens the frequency bandwith of the signal to sharpen it
- Hysteresis thresholding
"""
if not isCV(img):
raise ValueError("Image not in np.array format")
if min_val >= max_val:
raise ValueError('filterCanny: Value order incorrect')
gray_scale = toGrayScale(img)
#cv.imshow('Gray Scale image',gray_scale)
gaussian = filterGaussian(gray_scale,size=size,stdv=stdv)
#cv.imshow('Gaussian filter',gaussian)
return cv.Canny(gaussian,min_val,max_val)
def segmentRegionOfInterest(img):
height = img.shape[0]
polygons = np.array([
[(200, height), (1100, height), (550, 250)]
])
mask = np.zeros_like(img)
# Fill poly-function deals with multiple polygon
cv.fillPoly(mask, polygons, 255)
# Bitwise operation between canny image and mask image
masked_image = cv.bitwise_and(img, mask)
return masked_image
# MASKED: houghFilter function (lines 71-83)
def calculateLines(img,lines):
"""
Combines line segments into one or two lanes
Note: By looking at the slop of a line we can see if it is on the left side (m<0) or right (m>0)
"""
def calculateCoordinates(img,line_params):
"""
Calculates the coordinates for a road lane
"""
#y = m*x +b, m= slope, b=intercept
height, width, _ = img.shape
m, b = line_params
y1 = height
y2 = int(y1 * (1/2)) # make points from middle of the frame down
# bound the coordinates within the frame
x1 = max(-width, min(2 * width, int((y1 - b) / m)))
x2 = max(-width, min(2 * width, int((y2 - b) / m)))
return np.array([x1,y1, x2,y2])
lane_lines = []
if lines is None:
return np.array(lane_lines)
height, width, _ = img.shape
left_lines, right_lines = [], []
boundary = 1/3
left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen
right_region_boundary = width * boundary # right lane line segment should be on left 2/3 of the screen
for line in lines:
x1,y1, x2,y2 = line.reshape(4)
if x1 == x2:
#Vertical line
continue
#Fit a polynomial to the points to get the slope and intercept
line_params = np.polyfit((x1,x2), (y1,y2), 1)
slope,intercept = line_params[0], line_params[1]
if slope < 0: #left side
if x1 < left_region_boundary and x2 < left_region_boundary:
left_lines.append((slope,intercept))
else: #right
if x1 > right_region_boundary and x2 > right_region_boundary:
right_lines.append((slope,intercept))
left_lines_avg = np.average(left_lines,axis=0)
right_lines_avg = np.average(right_lines,axis=0)
if len(left_lines) > 0:
left_line = calculateCoordinates(img,left_lines_avg)
lane_lines.append(left_line)
if len(right_lines) > 0:
right_line = calculateCoordinates(img,right_lines_avg)
lane_lines.append(right_line)
return np.array(lane_lines)
def showMidLine(img,steering_angle,color=(0, 255, 0),thickness=5):
line_image = np.zeros_like(img)
height, width, _ = img.shape
# Note: the steering angle of:
# 0-89 degree: turn left
# 90 degree: going straight
# 91-180 degree: turn right
steering_angle_radian = steering_angle / 180.0 * math.pi
x1 = int(width / 2)
y1 = height
x2 = int(x1 - height / 2 / math.tan(steering_angle_radian))
y2 = int(height / 2)
cv.line(line_image, (x1, y1), (x2, y2), color, thickness)
return line_image
def showLines(img,lines,color=(255,0,0),thickness=5):
line_img = np.zeros(img.shape, dtype=np.uint8)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv.line(line_img, (x1,y1), (x2,y2), color, thickness)
return line_img
def calculateSteeringAngle(img,lines):
if len(lines) == 0:
return -90
height, width, _ = img.shape
if len(lines) == 1:
x1, _, x2, _ = lines[0]
x_offset = x2 - x1
else: #2 lines
_, _, left_x2, _ = lines[0]
_, _, right_x2, _ = lines[1]
camera_mid_offset_percent = 0.0 # 0.0 means car pointing to center, -0.03: car is centered to left, +0.03 means car pointing to right
mid = int(width / 2 * (1 + camera_mid_offset_percent))
x_offset = (left_x2 + right_x2) / 2 - mid
# find the steering angle, which is angle between navigation direction to end of center line
y_offset = int(height / 2)
angle_to_mid_radian = math.atan(x_offset / y_offset) # angle (in radian) to center vertical line
angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi) # angle (in degrees) to center vertical line
steering_angle = angle_to_mid_deg + 90 # this is the steering angle needed by picar front wheel
return steering_angle
def stabilizeSteeringAngle(curr_steering_angle, new_steering_angle, num_of_lane_lines, max_angle_deviation_two_lines=2, max_angle_deviation_one_lane=1):
"""
Using last steering angle to stabilize the steering angle
This can be improved to use last N angles, etc
if new angle is too different from current angle, only turn by max_angle_deviation degrees
"""
if num_of_lane_lines == 1:
# if only one lane detected, don't deviate too much
max_angle_deviation = max_angle_deviation_one_lane
else:
# if both lane lines detected, then we can deviate more
max_angle_deviation = max_angle_deviation_two_lines
angle_deviation = new_steering_angle - curr_steering_angle
if abs(angle_deviation) > max_angle_deviation:
stabilized_steering_angle = int(curr_steering_angle
+ max_angle_deviation * angle_deviation / abs(angle_deviation))
else:
stabilized_steering_angle = new_steering_angle
return stabilized_steering_angle
|
def houghFilter(frame,distance_resolution=2,angle_resolution=np.pi/180,min_n_intersections=50,min_line_size=30,max_line_gap=5):
"""
Params:
frame
distance_resolution: distance resolution of accumulator in pixels, larger ==> less precision
angle_resolution: angle of accumulator in radians, larger ==> less precision
min_n_intersections: minimum number of intersections
min_line_size: minimum length of line in pixels
max_line_gap: maximum distance in pixels between disconnected lines
"""
placeholder = np.array([])
hough = cv.HoughLinesP(frame,distance_resolution,angle_resolution,min_n_intersections,placeholder,min_line_size,max_line_gap)
return hough
| 71
| 83
|
import numpy as np
import cv2 as cv
import math
from server.cv_utils import *
def filterGaussian(img,size=(5,5),stdv=0):
"""Summary of filterGaussian
This will apply a noise reduction filter, we will use s 5x5 Gaussian filter to smooth
the image to lower the sensitivity to noise. (The smaller the size the less visible the blur)
To populate the Gaussian matrix we will use a kernel of normally distributed[stdv=1] numbers which will
set each pixel value equal to the weighted average of its neighboor pixels
The Gaussian distribution:
Gd = (1/2pi*stdv^2)exp(-((i-(k+1)^2) + (j - (k+1)^2))/(2*stdv^2))
i,j E [1,2k+1] for the kernel of size: (2k+1)x(2k+1)
"""
if not isCV(img):
raise ValueError("Image not in np.array format")
if not isinstance(size,tuple):
raise ValueError('filterGaussian: Size for Gaussian filter not tuple')
return cv.GaussianBlur(img,size,stdv)
def filterCanny(img,min_val=50,max_val=150,size=(5,5),stdv=0):
"""
The Canny detector is a multi-stage algorithm optimized for fast real-time edge detection,
which will reduce complexity of the image much further.
The algorithm will detect sharp changes in luminosity and will define them as edges.
The algorithm has the following stages:
- Noise reduction
- Intensity gradient - here it will apply a Sobel filter along the x and y axis to detect if edges are horizontal vertical or diagonal
- Non-maximum suppression - this shortens the frequency bandwith of the signal to sharpen it
- Hysteresis thresholding
"""
if not isCV(img):
raise ValueError("Image not in np.array format")
if min_val >= max_val:
raise ValueError('filterCanny: Value order incorrect')
gray_scale = toGrayScale(img)
#cv.imshow('Gray Scale image',gray_scale)
gaussian = filterGaussian(gray_scale,size=size,stdv=stdv)
#cv.imshow('Gaussian filter',gaussian)
return cv.Canny(gaussian,min_val,max_val)
def segmentRegionOfInterest(img):
height = img.shape[0]
polygons = np.array([
[(200, height), (1100, height), (550, 250)]
])
mask = np.zeros_like(img)
# Fill poly-function deals with multiple polygon
cv.fillPoly(mask, polygons, 255)
# Bitwise operation between canny image and mask image
masked_image = cv.bitwise_and(img, mask)
return masked_image
def houghFilter(frame,distance_resolution=2,angle_resolution=np.pi/180,min_n_intersections=50,min_line_size=30,max_line_gap=5):
"""
Params:
frame
distance_resolution: distance resolution of accumulator in pixels, larger ==> less precision
angle_resolution: angle of accumulator in radians, larger ==> less precision
min_n_intersections: minimum number of intersections
min_line_size: minimum length of line in pixels
max_line_gap: maximum distance in pixels between disconnected lines
"""
placeholder = np.array([])
hough = cv.HoughLinesP(frame,distance_resolution,angle_resolution,min_n_intersections,placeholder,min_line_size,max_line_gap)
return hough
def calculateLines(img,lines):
"""
Combines line segments into one or two lanes
Note: By looking at the slop of a line we can see if it is on the left side (m<0) or right (m>0)
"""
def calculateCoordinates(img,line_params):
"""
Calculates the coordinates for a road lane
"""
#y = m*x +b, m= slope, b=intercept
height, width, _ = img.shape
m, b = line_params
y1 = height
y2 = int(y1 * (1/2)) # make points from middle of the frame down
# bound the coordinates within the frame
x1 = max(-width, min(2 * width, int((y1 - b) / m)))
x2 = max(-width, min(2 * width, int((y2 - b) / m)))
return np.array([x1,y1, x2,y2])
lane_lines = []
if lines is None:
return np.array(lane_lines)
height, width, _ = img.shape
left_lines, right_lines = [], []
boundary = 1/3
left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen
right_region_boundary = width * boundary # right lane line segment should be on left 2/3 of the screen
for line in lines:
x1,y1, x2,y2 = line.reshape(4)
if x1 == x2:
#Vertical line
continue
#Fit a polynomial to the points to get the slope and intercept
line_params = np.polyfit((x1,x2), (y1,y2), 1)
slope,intercept = line_params[0], line_params[1]
if slope < 0: #left side
if x1 < left_region_boundary and x2 < left_region_boundary:
left_lines.append((slope,intercept))
else: #right
if x1 > right_region_boundary and x2 > right_region_boundary:
right_lines.append((slope,intercept))
left_lines_avg = np.average(left_lines,axis=0)
right_lines_avg = np.average(right_lines,axis=0)
if len(left_lines) > 0:
left_line = calculateCoordinates(img,left_lines_avg)
lane_lines.append(left_line)
if len(right_lines) > 0:
right_line = calculateCoordinates(img,right_lines_avg)
lane_lines.append(right_line)
return np.array(lane_lines)
def showMidLine(img,steering_angle,color=(0, 255, 0),thickness=5):
line_image = np.zeros_like(img)
height, width, _ = img.shape
# Note: the steering angle of:
# 0-89 degree: turn left
# 90 degree: going straight
# 91-180 degree: turn right
steering_angle_radian = steering_angle / 180.0 * math.pi
x1 = int(width / 2)
y1 = height
x2 = int(x1 - height / 2 / math.tan(steering_angle_radian))
y2 = int(height / 2)
cv.line(line_image, (x1, y1), (x2, y2), color, thickness)
return line_image
def showLines(img,lines,color=(255,0,0),thickness=5):
line_img = np.zeros(img.shape, dtype=np.uint8)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv.line(line_img, (x1,y1), (x2,y2), color, thickness)
return line_img
def calculateSteeringAngle(img,lines):
if len(lines) == 0:
return -90
height, width, _ = img.shape
if len(lines) == 1:
x1, _, x2, _ = lines[0]
x_offset = x2 - x1
else: #2 lines
_, _, left_x2, _ = lines[0]
_, _, right_x2, _ = lines[1]
camera_mid_offset_percent = 0.0 # 0.0 means car pointing to center, -0.03: car is centered to left, +0.03 means car pointing to right
mid = int(width / 2 * (1 + camera_mid_offset_percent))
x_offset = (left_x2 + right_x2) / 2 - mid
# find the steering angle, which is angle between navigation direction to end of center line
y_offset = int(height / 2)
angle_to_mid_radian = math.atan(x_offset / y_offset) # angle (in radian) to center vertical line
angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi) # angle (in degrees) to center vertical line
steering_angle = angle_to_mid_deg + 90 # this is the steering angle needed by picar front wheel
return steering_angle
def stabilizeSteeringAngle(curr_steering_angle, new_steering_angle, num_of_lane_lines, max_angle_deviation_two_lines=2, max_angle_deviation_one_lane=1):
"""
Using last steering angle to stabilize the steering angle
This can be improved to use last N angles, etc
if new angle is too different from current angle, only turn by max_angle_deviation degrees
"""
if num_of_lane_lines == 1:
# if only one lane detected, don't deviate too much
max_angle_deviation = max_angle_deviation_one_lane
else:
# if both lane lines detected, then we can deviate more
max_angle_deviation = max_angle_deviation_two_lines
angle_deviation = new_steering_angle - curr_steering_angle
if abs(angle_deviation) > max_angle_deviation:
stabilized_steering_angle = int(curr_steering_angle
+ max_angle_deviation * angle_deviation / abs(angle_deviation))
else:
stabilized_steering_angle = new_steering_angle
return stabilized_steering_angle
|
ValidateSingleFile
|
Does corresponding validations if histograms.xml or enums.xml is changed.
Args:
input_api: An input_api instance that contains information about changes.
output_api: An output_api instance to create results of the PRESUBMIT check.
file_obj: A file object of one of the changed files.
cwd: Path to current working directory.
results: The returned variable which is a list of output_api results.
Returns:
A boolean that True if a histograms.xml or enums.xml file is changed.
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
def GetPrettyPrintErrors(input_api, output_api, cwd, rel_path, results):
"""Runs pretty-print command for specified file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'pretty_print.py', rel_path, '--presubmit',
'--non-interactive'], cwd=cwd)
if exit_code != 0:
error_msg = (
'%s is not formatted correctly; run git cl format to fix.' % rel_path)
results.append(output_api.PresubmitError(error_msg))
def GetPrefixErrors(input_api, output_api, cwd, rel_path, results):
"""Validates histogram prefixes in specified file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_prefix.py', rel_path], cwd=cwd)
if exit_code != 0:
error_msg = ('%s contains histogram(s) with disallowed prefix, please run '
'validate_prefix.py %s to fix.' % (rel_path, rel_path))
results.append(output_api.PresubmitError(error_msg))
def GetObsoleteXmlErrors(input_api, output_api, cwd, results):
"""Validates all histograms in the file are obsolete."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_obsolete_histograms.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'histograms_xml/obsolete_histograms.xml contains non-obsolete '
'histograms, please run validate_obsolete_histograms.py to fix.')
results.append(output_api.PresubmitError(error_msg))
def GetValidateHistogramsError(input_api, output_api, cwd, results):
"""Validates histograms format and index file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_format.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'Histograms are not well-formatted; please run %s/validate_format.py '
'and fix the reported errors.' % cwd)
results.append(output_api.PresubmitError(error_msg))
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_histograms_index.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'Histograms index file is not up-to-date. Please run '
'%s/histogram_paths.py to update it' % cwd)
results.append(output_api.PresubmitError(error_msg))
# MASKED: ValidateSingleFile function (lines 67-112)
def CheckChange(input_api, output_api):
"""Checks that histograms.xml is pretty-printed and well-formatted."""
results = []
cwd = input_api.PresubmitLocalPath()
xml_changed = False
# Only for changed files, do corresponding checks if the file is
# histograms.xml, enums.xml or obsolete_histograms.xml.
for file_obj in input_api.AffectedTextFiles():
is_changed = ValidateSingleFile(
input_api, output_api, file_obj, cwd, results)
xml_changed = xml_changed or is_changed
# Run validate_format.py and validate_histograms_index.py, if changed files
# contain histograms.xml or enums.xml.
if xml_changed:
GetValidateHistogramsError(input_api, output_api, cwd, results)
return results
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
def ValidateSingleFile(input_api, output_api, file_obj, cwd, results):
"""Does corresponding validations if histograms.xml or enums.xml is changed.
Args:
input_api: An input_api instance that contains information about changes.
output_api: An output_api instance to create results of the PRESUBMIT check.
file_obj: A file object of one of the changed files.
cwd: Path to current working directory.
results: The returned variable which is a list of output_api results.
Returns:
A boolean that True if a histograms.xml or enums.xml file is changed.
"""
p = file_obj.AbsoluteLocalPath()
# Only do PRESUBMIT checks when |p| is under |cwd|.
if input_api.os_path.commonprefix([p, cwd]) != cwd:
return False
filepath = input_api.os_path.relpath(p, cwd)
if 'test_data' in filepath:
return False
# If the changed file is obsolete_histograms.xml, validate all histograms
# inside are obsolete.
if 'obsolete_histograms.xml' in filepath:
GetObsoleteXmlErrors(input_api, output_api, cwd, results)
# Return false here because we don't need to validate format if users only
# change obsolete_histograms.xml.
return False
# If the changed file is histograms.xml or histogram_suffixes_list.xml,
# pretty-print and validate prefix it.
elif ('histograms.xml' in filepath
or 'histogram_suffixes_list.xml' in filepath):
GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results)
# TODO(crbug/1120229): Re-enable validate prefix check once all histograms
# are split.
# GetPrefixErrors(input_api, output_api, cwd, filepath, results)
return True
# If the changed file is enums.xml, pretty-print it.
elif 'enums.xml' in filepath:
GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results)
return True
return False
| 67
| 112
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
def GetPrettyPrintErrors(input_api, output_api, cwd, rel_path, results):
"""Runs pretty-print command for specified file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'pretty_print.py', rel_path, '--presubmit',
'--non-interactive'], cwd=cwd)
if exit_code != 0:
error_msg = (
'%s is not formatted correctly; run git cl format to fix.' % rel_path)
results.append(output_api.PresubmitError(error_msg))
def GetPrefixErrors(input_api, output_api, cwd, rel_path, results):
"""Validates histogram prefixes in specified file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_prefix.py', rel_path], cwd=cwd)
if exit_code != 0:
error_msg = ('%s contains histogram(s) with disallowed prefix, please run '
'validate_prefix.py %s to fix.' % (rel_path, rel_path))
results.append(output_api.PresubmitError(error_msg))
def GetObsoleteXmlErrors(input_api, output_api, cwd, results):
"""Validates all histograms in the file are obsolete."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_obsolete_histograms.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'histograms_xml/obsolete_histograms.xml contains non-obsolete '
'histograms, please run validate_obsolete_histograms.py to fix.')
results.append(output_api.PresubmitError(error_msg))
def GetValidateHistogramsError(input_api, output_api, cwd, results):
"""Validates histograms format and index file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_format.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'Histograms are not well-formatted; please run %s/validate_format.py '
'and fix the reported errors.' % cwd)
results.append(output_api.PresubmitError(error_msg))
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_histograms_index.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'Histograms index file is not up-to-date. Please run '
'%s/histogram_paths.py to update it' % cwd)
results.append(output_api.PresubmitError(error_msg))
def ValidateSingleFile(input_api, output_api, file_obj, cwd, results):
"""Does corresponding validations if histograms.xml or enums.xml is changed.
Args:
input_api: An input_api instance that contains information about changes.
output_api: An output_api instance to create results of the PRESUBMIT check.
file_obj: A file object of one of the changed files.
cwd: Path to current working directory.
results: The returned variable which is a list of output_api results.
Returns:
A boolean that True if a histograms.xml or enums.xml file is changed.
"""
p = file_obj.AbsoluteLocalPath()
# Only do PRESUBMIT checks when |p| is under |cwd|.
if input_api.os_path.commonprefix([p, cwd]) != cwd:
return False
filepath = input_api.os_path.relpath(p, cwd)
if 'test_data' in filepath:
return False
# If the changed file is obsolete_histograms.xml, validate all histograms
# inside are obsolete.
if 'obsolete_histograms.xml' in filepath:
GetObsoleteXmlErrors(input_api, output_api, cwd, results)
# Return false here because we don't need to validate format if users only
# change obsolete_histograms.xml.
return False
# If the changed file is histograms.xml or histogram_suffixes_list.xml,
# pretty-print and validate prefix it.
elif ('histograms.xml' in filepath
or 'histogram_suffixes_list.xml' in filepath):
GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results)
# TODO(crbug/1120229): Re-enable validate prefix check once all histograms
# are split.
# GetPrefixErrors(input_api, output_api, cwd, filepath, results)
return True
# If the changed file is enums.xml, pretty-print it.
elif 'enums.xml' in filepath:
GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results)
return True
return False
def CheckChange(input_api, output_api):
"""Checks that histograms.xml is pretty-printed and well-formatted."""
results = []
cwd = input_api.PresubmitLocalPath()
xml_changed = False
# Only for changed files, do corresponding checks if the file is
# histograms.xml, enums.xml or obsolete_histograms.xml.
for file_obj in input_api.AffectedTextFiles():
is_changed = ValidateSingleFile(
input_api, output_api, file_obj, cwd, results)
xml_changed = xml_changed or is_changed
# Run validate_format.py and validate_histograms_index.py, if changed files
# contain histograms.xml or enums.xml.
if xml_changed:
GetValidateHistogramsError(input_api, output_api, cwd, results)
return results
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
__init__
|
Constructor
:param model:
Model class
:param name:
Display name
:param category:
Display category
:param endpoint:
Endpoint
:param url:
Custom URL
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
|
import logging
from flask import request, flash, abort, Response
from flask_admin import expose
from flask_admin.babel import gettext, ngettext, lazy_gettext
from flask_admin.model import BaseModelView
from flask_admin.model.form import wrap_fields_in_fieldlist
from flask_admin.model.fields import ListEditableFieldList
from flask_admin._compat import iteritems, string_types
import mongoengine
import gridfs
from mongoengine.connection import get_db
from bson.objectid import ObjectId
from flask_admin.actions import action
from .filters import FilterConverter, BaseMongoEngineFilter
from .form import get_form, CustomModelConverter
from .typefmt import DEFAULT_FORMATTERS
from .tools import parse_like_term
from .helpers import format_error
from .ajax import process_ajax_references, create_ajax_loader
from .subdoc import convert_subdocuments
# Set up logger
log = logging.getLogger("flask-admin.mongo")
SORTABLE_FIELDS = set((
mongoengine.StringField,
mongoengine.IntField,
mongoengine.FloatField,
mongoengine.BooleanField,
mongoengine.DateTimeField,
mongoengine.ComplexDateTimeField,
mongoengine.ObjectIdField,
mongoengine.DecimalField,
mongoengine.ReferenceField,
mongoengine.EmailField,
mongoengine.UUIDField,
mongoengine.URLField
))
class ModelView(BaseModelView):
"""
MongoEngine model scaffolding.
"""
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of
:class:`flask_admin.contrib.mongoengine.filters.BaseFilter`
classes.
For example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
or::
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(User.name, 'Name'))
"""
model_form_converter = CustomModelConverter
"""
Model form conversion class. Use this to implement custom
field conversion logic.
Custom class should be derived from the
`flask_admin.contrib.mongoengine.form.CustomModelConverter`.
For example::
class MyModelConverter(AdminModelConverter):
pass
class MyAdminView(ModelView):
model_form_converter = MyModelConverter
"""
object_id_converter = ObjectId
"""
Mongodb ``_id`` value conversion function. Default is `bson.ObjectId`.
Use this if you are using String, Binary and etc.
For example::
class MyModelView(BaseModelView):
object_id_converter = int
or::
class MyModelView(BaseModelView):
object_id_converter = str
"""
filter_converter = FilterConverter()
"""
Field to filter converter.
Override this attribute to use a non-default converter.
"""
column_type_formatters = DEFAULT_FORMATTERS
"""
Customized type formatters for MongoEngine backend
"""
allowed_search_types = (mongoengine.StringField,
mongoengine.URLField,
mongoengine.EmailField)
"""
List of allowed search field types.
"""
form_subdocuments = None
"""
Subdocument configuration options.
This field accepts dictionary, where key is field name and value is either dictionary or instance of the
`flask_admin.contrib.EmbeddedForm`.
Consider following example::
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Post(db.Document):
text = db.StringField(max_length=30)
data = db.EmbeddedDocumentField(Comment)
class MyAdmin(ModelView):
form_subdocuments = {
'data': {
'form_columns': ('name',)
}
}
In this example, `Post` model has child `Comment` subdocument. When generating form for `Comment` embedded
document, Flask-Admin will only create `name` field.
It is also possible to use class-based embedded document configuration::
class CommentEmbed(EmbeddedForm):
form_columns = ('name',)
class MyAdmin(ModelView):
form_subdocuments = {
'data': CommentEmbed()
}
Arbitrary depth nesting is supported::
class SomeEmbed(EmbeddedForm):
form_excluded_columns = ('test',)
class CommentEmbed(EmbeddedForm):
form_columns = ('name',)
form_subdocuments = {
'inner': SomeEmbed()
}
class MyAdmin(ModelView):
form_subdocuments = {
'data': CommentEmbed()
}
There's also support for forms embedded into `ListField`. All you have
to do is to create nested rule with `None` as a name. Even though it
is slightly confusing, but that's how Flask-MongoEngine creates
form fields embedded into ListField::
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Post(db.Document):
text = db.StringField(max_length=30)
data = db.ListField(db.EmbeddedDocumentField(Comment))
class MyAdmin(ModelView):
form_subdocuments = {
'data': {
'form_subdocuments': {
None: {
'form_columns': ('name',)
}
}
}
}
"""
# MASKED: __init__ function (lines 202-238)
def _refresh_cache(self):
"""
Refresh cache.
"""
# Process subdocuments
if self.form_subdocuments is None:
self.form_subdocuments = {}
self._form_subdocuments = convert_subdocuments(self.form_subdocuments)
# Cache other properties
super(ModelView, self)._refresh_cache()
def _process_ajax_references(self):
"""
AJAX endpoint is exposed by top-level admin view class, but
subdocuments might have AJAX references too.
This method will recursively go over subdocument configuration
and will precompute AJAX references for them ensuring that
subdocuments can also use AJAX to populate their ReferenceFields.
"""
references = super(ModelView, self)._process_ajax_references()
return process_ajax_references(references, self)
def _get_model_fields(self, model=None):
"""
Inspect model and return list of model fields
:param model:
Model to inspect
"""
if model is None:
model = self.model
return sorted(iteritems(model._fields), key=lambda n: n[1].creation_counter)
def scaffold_pk(self):
# MongoEngine models have predefined 'id' as a key
return 'id'
def get_pk_value(self, model):
"""
Return the primary key value from the model instance
:param model:
Model instance
"""
return model.pk
def scaffold_list_columns(self):
"""
Scaffold list columns
"""
columns = []
for n, f in self._get_model_fields():
# Verify type
field_class = type(f)
if (field_class == mongoengine.ListField and
isinstance(f.field, mongoengine.EmbeddedDocumentField)):
continue
if field_class == mongoengine.EmbeddedDocumentField:
continue
if self.column_display_pk or field_class != mongoengine.ObjectIdField:
columns.append(n)
return columns
def scaffold_sortable_columns(self):
"""
Return a dictionary of sortable columns (name, field)
"""
columns = {}
for n, f in self._get_model_fields():
if type(f) in SORTABLE_FIELDS:
if self.column_display_pk or type(f) != mongoengine.ObjectIdField:
columns[n] = f
return columns
def init_search(self):
"""
Init search
"""
if self.column_searchable_list:
for p in self.column_searchable_list:
if isinstance(p, string_types):
p = self.model._fields.get(p)
if p is None:
raise Exception('Invalid search field')
field_type = type(p)
# Check type
if (field_type not in self.allowed_search_types):
raise Exception('Can only search on text columns. ' +
'Failed to setup search for "%s"' % p)
self._search_fields.append(p)
return bool(self._search_fields)
def scaffold_filters(self, name):
"""
Return filter object(s) for the field
:param name:
Either field name or field instance
"""
if isinstance(name, string_types):
attr = self.model._fields.get(name)
else:
attr = name
if attr is None:
raise Exception('Failed to find field for filter: %s' % name)
# Find name
visible_name = None
if not isinstance(name, string_types):
visible_name = self.get_column_name(attr.name)
if not visible_name:
visible_name = self.get_column_name(name)
# Convert filter
type_name = type(attr).__name__
flt = self.filter_converter.convert(type_name,
attr,
visible_name)
return flt
def is_valid_filter(self, filter):
"""
Validate if the provided filter is a valid MongoEngine filter
:param filter:
Filter object
"""
return isinstance(filter, BaseMongoEngineFilter)
def scaffold_form(self):
"""
Create form from the model.
"""
form_class = get_form(self.model,
self.model_form_converter(self),
base_class=self.form_base_class,
only=self.form_columns,
exclude=self.form_excluded_columns,
field_args=self.form_args,
extra_fields=self.form_extra_fields)
return form_class
def scaffold_list_form(self, custom_fieldlist=ListEditableFieldList,
validators=None):
"""
Create form for the `index_view` using only the columns from
`self.column_editable_list`.
:param validators:
`form_args` dict with only validators
{'name': {'validators': [required()]}}
:param custom_fieldlist:
A WTForm FieldList class. By default, `ListEditableFieldList`.
"""
form_class = get_form(self.model,
self.model_form_converter(self),
base_class=self.form_base_class,
only=self.column_editable_list,
field_args=validators)
return wrap_fields_in_fieldlist(self.form_base_class,
form_class,
custom_fieldlist)
# AJAX foreignkey support
def _create_ajax_loader(self, name, opts):
return create_ajax_loader(self.model, name, name, opts)
def get_query(self):
"""
Returns the QuerySet for this view. By default, it returns all the
objects for the current model.
"""
return self.model.objects
def _search(self, query, search_term):
# TODO: Unfortunately, MongoEngine contains bug which
# prevents running complex Q queries and, as a result,
# Flask-Admin does not support per-word searching like
# in other backends
op, term = parse_like_term(search_term)
criteria = None
for field in self._search_fields:
flt = {'%s__%s' % (field.name, op): term}
q = mongoengine.Q(**flt)
if criteria is None:
criteria = q
else:
criteria |= q
return query.filter(criteria)
def get_list(self, page, sort_column, sort_desc, search, filters,
execute=True):
"""
Get list of objects from MongoEngine
:param page:
Page number
:param sort_column:
Sort column
:param sort_desc:
Sort descending
:param search:
Search criteria
:param filters:
List of applied filters
:param execute:
Run query immediately or not
"""
query = self.get_query()
# Filters
if self._filters:
for flt, flt_name, value in filters:
f = self._filters[flt]
query = f.apply(query, f.clean(value))
# Search
if self._search_supported and search:
query = self._search(query, search)
# Get count
count = query.count() if not self.simple_list_pager else None
# Sorting
if sort_column:
query = query.order_by('%s%s' % ('-' if sort_desc else '', sort_column))
else:
order = self._get_default_order()
if order:
query = query.order_by('%s%s' % ('-' if order[1] else '', order[0]))
# Pagination
if page is not None:
query = query.skip(page * self.page_size)
query = query.limit(self.page_size)
if execute:
query = query.all()
return count, query
def get_one(self, id):
"""
Return a single model instance by its ID
:param id:
Model ID
"""
try:
return self.get_query().filter(pk=id).first()
except mongoengine.ValidationError as ex:
flash(gettext('Failed to get model. %(error)s',
error=format_error(ex)),
'error')
return None
def create_model(self, form):
"""
Create model helper
:param form:
Form instance
"""
try:
model = self.model()
form.populate_obj(model)
self._on_model_change(form, model, True)
model.save()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to create record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to create record.')
return False
else:
self.after_model_change(form, model, True)
return model
def update_model(self, form, model):
"""
Update model helper
:param form:
Form instance
:param model:
Model instance to update
"""
try:
form.populate_obj(model)
self._on_model_change(form, model, False)
model.save()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to update record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to update record.')
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
"""
Delete model helper
:param model:
Model instance
"""
try:
self.on_model_delete(model)
model.delete()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to delete record.')
return False
else:
self.after_model_delete(model)
return True
# FileField access API
@expose('/api/file/')
def api_file_view(self):
pk = request.args.get('id')
coll = request.args.get('coll')
db = request.args.get('db', 'default')
if not pk or not coll or not db:
abort(404)
fs = gridfs.GridFS(get_db(db), coll)
data = fs.get(self.object_id_converter(pk))
if not data:
abort(404)
return Response(data.read(),
content_type=data.content_type,
headers={
'Content-Length': data.length
})
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
try:
count = 0
all_ids = [self.object_id_converter(pk) for pk in ids]
for obj in self.get_query().in_bulk(all_ids).values():
count += self.delete_model(obj)
flash(ngettext('Record was successfully deleted.',
'%(count)s records were successfully deleted.',
count,
count=count))
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete records. %(error)s', error=str(ex)),
'error')
|
def __init__(self, model, name=None,
category=None, endpoint=None, url=None, static_folder=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor
:param model:
Model class
:param name:
Display name
:param category:
Display category
:param endpoint:
Endpoint
:param url:
Custom URL
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self._search_fields = []
super(ModelView, self).__init__(model, name, category, endpoint, url, static_folder,
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
self._primary_key = self.scaffold_pk()
| 202
| 238
|
import logging
from flask import request, flash, abort, Response
from flask_admin import expose
from flask_admin.babel import gettext, ngettext, lazy_gettext
from flask_admin.model import BaseModelView
from flask_admin.model.form import wrap_fields_in_fieldlist
from flask_admin.model.fields import ListEditableFieldList
from flask_admin._compat import iteritems, string_types
import mongoengine
import gridfs
from mongoengine.connection import get_db
from bson.objectid import ObjectId
from flask_admin.actions import action
from .filters import FilterConverter, BaseMongoEngineFilter
from .form import get_form, CustomModelConverter
from .typefmt import DEFAULT_FORMATTERS
from .tools import parse_like_term
from .helpers import format_error
from .ajax import process_ajax_references, create_ajax_loader
from .subdoc import convert_subdocuments
# Set up logger
log = logging.getLogger("flask-admin.mongo")
SORTABLE_FIELDS = set((
mongoengine.StringField,
mongoengine.IntField,
mongoengine.FloatField,
mongoengine.BooleanField,
mongoengine.DateTimeField,
mongoengine.ComplexDateTimeField,
mongoengine.ObjectIdField,
mongoengine.DecimalField,
mongoengine.ReferenceField,
mongoengine.EmailField,
mongoengine.UUIDField,
mongoengine.URLField
))
class ModelView(BaseModelView):
"""
MongoEngine model scaffolding.
"""
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of
:class:`flask_admin.contrib.mongoengine.filters.BaseFilter`
classes.
For example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
or::
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(User.name, 'Name'))
"""
model_form_converter = CustomModelConverter
"""
Model form conversion class. Use this to implement custom
field conversion logic.
Custom class should be derived from the
`flask_admin.contrib.mongoengine.form.CustomModelConverter`.
For example::
class MyModelConverter(AdminModelConverter):
pass
class MyAdminView(ModelView):
model_form_converter = MyModelConverter
"""
object_id_converter = ObjectId
"""
Mongodb ``_id`` value conversion function. Default is `bson.ObjectId`.
Use this if you are using String, Binary and etc.
For example::
class MyModelView(BaseModelView):
object_id_converter = int
or::
class MyModelView(BaseModelView):
object_id_converter = str
"""
filter_converter = FilterConverter()
"""
Field to filter converter.
Override this attribute to use a non-default converter.
"""
column_type_formatters = DEFAULT_FORMATTERS
"""
Customized type formatters for MongoEngine backend
"""
allowed_search_types = (mongoengine.StringField,
mongoengine.URLField,
mongoengine.EmailField)
"""
List of allowed search field types.
"""
form_subdocuments = None
"""
Subdocument configuration options.
This field accepts dictionary, where key is field name and value is either dictionary or instance of the
`flask_admin.contrib.EmbeddedForm`.
Consider following example::
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Post(db.Document):
text = db.StringField(max_length=30)
data = db.EmbeddedDocumentField(Comment)
class MyAdmin(ModelView):
form_subdocuments = {
'data': {
'form_columns': ('name',)
}
}
In this example, `Post` model has child `Comment` subdocument. When generating form for `Comment` embedded
document, Flask-Admin will only create `name` field.
It is also possible to use class-based embedded document configuration::
class CommentEmbed(EmbeddedForm):
form_columns = ('name',)
class MyAdmin(ModelView):
form_subdocuments = {
'data': CommentEmbed()
}
Arbitrary depth nesting is supported::
class SomeEmbed(EmbeddedForm):
form_excluded_columns = ('test',)
class CommentEmbed(EmbeddedForm):
form_columns = ('name',)
form_subdocuments = {
'inner': SomeEmbed()
}
class MyAdmin(ModelView):
form_subdocuments = {
'data': CommentEmbed()
}
There's also support for forms embedded into `ListField`. All you have
to do is to create nested rule with `None` as a name. Even though it
is slightly confusing, but that's how Flask-MongoEngine creates
form fields embedded into ListField::
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Post(db.Document):
text = db.StringField(max_length=30)
data = db.ListField(db.EmbeddedDocumentField(Comment))
class MyAdmin(ModelView):
form_subdocuments = {
'data': {
'form_subdocuments': {
None: {
'form_columns': ('name',)
}
}
}
}
"""
def __init__(self, model, name=None,
category=None, endpoint=None, url=None, static_folder=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor
:param model:
Model class
:param name:
Display name
:param category:
Display category
:param endpoint:
Endpoint
:param url:
Custom URL
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self._search_fields = []
super(ModelView, self).__init__(model, name, category, endpoint, url, static_folder,
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
self._primary_key = self.scaffold_pk()
def _refresh_cache(self):
"""
Refresh cache.
"""
# Process subdocuments
if self.form_subdocuments is None:
self.form_subdocuments = {}
self._form_subdocuments = convert_subdocuments(self.form_subdocuments)
# Cache other properties
super(ModelView, self)._refresh_cache()
def _process_ajax_references(self):
"""
AJAX endpoint is exposed by top-level admin view class, but
subdocuments might have AJAX references too.
This method will recursively go over subdocument configuration
and will precompute AJAX references for them ensuring that
subdocuments can also use AJAX to populate their ReferenceFields.
"""
references = super(ModelView, self)._process_ajax_references()
return process_ajax_references(references, self)
def _get_model_fields(self, model=None):
"""
Inspect model and return list of model fields
:param model:
Model to inspect
"""
if model is None:
model = self.model
return sorted(iteritems(model._fields), key=lambda n: n[1].creation_counter)
def scaffold_pk(self):
# MongoEngine models have predefined 'id' as a key
return 'id'
def get_pk_value(self, model):
"""
Return the primary key value from the model instance
:param model:
Model instance
"""
return model.pk
def scaffold_list_columns(self):
"""
Scaffold list columns
"""
columns = []
for n, f in self._get_model_fields():
# Verify type
field_class = type(f)
if (field_class == mongoengine.ListField and
isinstance(f.field, mongoengine.EmbeddedDocumentField)):
continue
if field_class == mongoengine.EmbeddedDocumentField:
continue
if self.column_display_pk or field_class != mongoengine.ObjectIdField:
columns.append(n)
return columns
def scaffold_sortable_columns(self):
"""
Return a dictionary of sortable columns (name, field)
"""
columns = {}
for n, f in self._get_model_fields():
if type(f) in SORTABLE_FIELDS:
if self.column_display_pk or type(f) != mongoengine.ObjectIdField:
columns[n] = f
return columns
def init_search(self):
"""
Init search
"""
if self.column_searchable_list:
for p in self.column_searchable_list:
if isinstance(p, string_types):
p = self.model._fields.get(p)
if p is None:
raise Exception('Invalid search field')
field_type = type(p)
# Check type
if (field_type not in self.allowed_search_types):
raise Exception('Can only search on text columns. ' +
'Failed to setup search for "%s"' % p)
self._search_fields.append(p)
return bool(self._search_fields)
def scaffold_filters(self, name):
"""
Return filter object(s) for the field
:param name:
Either field name or field instance
"""
if isinstance(name, string_types):
attr = self.model._fields.get(name)
else:
attr = name
if attr is None:
raise Exception('Failed to find field for filter: %s' % name)
# Find name
visible_name = None
if not isinstance(name, string_types):
visible_name = self.get_column_name(attr.name)
if not visible_name:
visible_name = self.get_column_name(name)
# Convert filter
type_name = type(attr).__name__
flt = self.filter_converter.convert(type_name,
attr,
visible_name)
return flt
def is_valid_filter(self, filter):
"""
Validate if the provided filter is a valid MongoEngine filter
:param filter:
Filter object
"""
return isinstance(filter, BaseMongoEngineFilter)
def scaffold_form(self):
"""
Create form from the model.
"""
form_class = get_form(self.model,
self.model_form_converter(self),
base_class=self.form_base_class,
only=self.form_columns,
exclude=self.form_excluded_columns,
field_args=self.form_args,
extra_fields=self.form_extra_fields)
return form_class
def scaffold_list_form(self, custom_fieldlist=ListEditableFieldList,
validators=None):
"""
Create form for the `index_view` using only the columns from
`self.column_editable_list`.
:param validators:
`form_args` dict with only validators
{'name': {'validators': [required()]}}
:param custom_fieldlist:
A WTForm FieldList class. By default, `ListEditableFieldList`.
"""
form_class = get_form(self.model,
self.model_form_converter(self),
base_class=self.form_base_class,
only=self.column_editable_list,
field_args=validators)
return wrap_fields_in_fieldlist(self.form_base_class,
form_class,
custom_fieldlist)
# AJAX foreignkey support
def _create_ajax_loader(self, name, opts):
return create_ajax_loader(self.model, name, name, opts)
def get_query(self):
"""
Returns the QuerySet for this view. By default, it returns all the
objects for the current model.
"""
return self.model.objects
def _search(self, query, search_term):
# TODO: Unfortunately, MongoEngine contains bug which
# prevents running complex Q queries and, as a result,
# Flask-Admin does not support per-word searching like
# in other backends
op, term = parse_like_term(search_term)
criteria = None
for field in self._search_fields:
flt = {'%s__%s' % (field.name, op): term}
q = mongoengine.Q(**flt)
if criteria is None:
criteria = q
else:
criteria |= q
return query.filter(criteria)
def get_list(self, page, sort_column, sort_desc, search, filters,
execute=True):
"""
Get list of objects from MongoEngine
:param page:
Page number
:param sort_column:
Sort column
:param sort_desc:
Sort descending
:param search:
Search criteria
:param filters:
List of applied filters
:param execute:
Run query immediately or not
"""
query = self.get_query()
# Filters
if self._filters:
for flt, flt_name, value in filters:
f = self._filters[flt]
query = f.apply(query, f.clean(value))
# Search
if self._search_supported and search:
query = self._search(query, search)
# Get count
count = query.count() if not self.simple_list_pager else None
# Sorting
if sort_column:
query = query.order_by('%s%s' % ('-' if sort_desc else '', sort_column))
else:
order = self._get_default_order()
if order:
query = query.order_by('%s%s' % ('-' if order[1] else '', order[0]))
# Pagination
if page is not None:
query = query.skip(page * self.page_size)
query = query.limit(self.page_size)
if execute:
query = query.all()
return count, query
def get_one(self, id):
"""
Return a single model instance by its ID
:param id:
Model ID
"""
try:
return self.get_query().filter(pk=id).first()
except mongoengine.ValidationError as ex:
flash(gettext('Failed to get model. %(error)s',
error=format_error(ex)),
'error')
return None
def create_model(self, form):
"""
Create model helper
:param form:
Form instance
"""
try:
model = self.model()
form.populate_obj(model)
self._on_model_change(form, model, True)
model.save()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to create record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to create record.')
return False
else:
self.after_model_change(form, model, True)
return model
def update_model(self, form, model):
"""
Update model helper
:param form:
Form instance
:param model:
Model instance to update
"""
try:
form.populate_obj(model)
self._on_model_change(form, model, False)
model.save()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to update record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to update record.')
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
"""
Delete model helper
:param model:
Model instance
"""
try:
self.on_model_delete(model)
model.delete()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to delete record.')
return False
else:
self.after_model_delete(model)
return True
# FileField access API
@expose('/api/file/')
def api_file_view(self):
pk = request.args.get('id')
coll = request.args.get('coll')
db = request.args.get('db', 'default')
if not pk or not coll or not db:
abort(404)
fs = gridfs.GridFS(get_db(db), coll)
data = fs.get(self.object_id_converter(pk))
if not data:
abort(404)
return Response(data.read(),
content_type=data.content_type,
headers={
'Content-Length': data.length
})
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
try:
count = 0
all_ids = [self.object_id_converter(pk) for pk in ids]
for obj in self.get_query().in_bulk(all_ids).values():
count += self.delete_model(obj)
flash(ngettext('Record was successfully deleted.',
'%(count)s records were successfully deleted.',
count,
count=count))
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete records. %(error)s', error=str(ex)),
'error')
|
_from_openapi_data
|
CoinsForwardingSuccessData - a model defined in OpenAPI
Args:
product (str): Represents the Crypto APIs 2.0 product which sends the callback.
event (str): Defines the specific event, for which a callback subscription is set.
item (CoinsForwardingSuccessDataItem):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.coins_forwarding_success_data_item import CoinsForwardingSuccessDataItem
globals()['CoinsForwardingSuccessDataItem'] = CoinsForwardingSuccessDataItem
class CoinsForwardingSuccessData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'product': (str,), # noqa: E501
'event': (str,), # noqa: E501
'item': (CoinsForwardingSuccessDataItem,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'product': 'product', # noqa: E501
'event': 'event', # noqa: E501
'item': 'item', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
# MASKED: _from_openapi_data function (lines 112-191)
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, product, event, item, *args, **kwargs): # noqa: E501
"""CoinsForwardingSuccessData - a model defined in OpenAPI
Args:
product (str): Represents the Crypto APIs 2.0 product which sends the callback.
event (str): Defines the specific event, for which a callback subscription is set.
item (CoinsForwardingSuccessDataItem):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.product = product
self.event = event
self.item = item
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, product, event, item, *args, **kwargs): # noqa: E501
"""CoinsForwardingSuccessData - a model defined in OpenAPI
Args:
product (str): Represents the Crypto APIs 2.0 product which sends the callback.
event (str): Defines the specific event, for which a callback subscription is set.
item (CoinsForwardingSuccessDataItem):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.product = product
self.event = event
self.item = item
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
| 112
| 191
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.coins_forwarding_success_data_item import CoinsForwardingSuccessDataItem
globals()['CoinsForwardingSuccessDataItem'] = CoinsForwardingSuccessDataItem
class CoinsForwardingSuccessData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'product': (str,), # noqa: E501
'event': (str,), # noqa: E501
'item': (CoinsForwardingSuccessDataItem,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'product': 'product', # noqa: E501
'event': 'event', # noqa: E501
'item': 'item', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, product, event, item, *args, **kwargs): # noqa: E501
"""CoinsForwardingSuccessData - a model defined in OpenAPI
Args:
product (str): Represents the Crypto APIs 2.0 product which sends the callback.
event (str): Defines the specific event, for which a callback subscription is set.
item (CoinsForwardingSuccessDataItem):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.product = product
self.event = event
self.item = item
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, product, event, item, *args, **kwargs): # noqa: E501
"""CoinsForwardingSuccessData - a model defined in OpenAPI
Args:
product (str): Represents the Crypto APIs 2.0 product which sends the callback.
event (str): Defines the specific event, for which a callback subscription is set.
item (CoinsForwardingSuccessDataItem):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.product = product
self.event = event
self.item = item
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
_activation_summary
|
Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
# MASKED: _activation_summary function (lines 79-95)
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
| 79
| 95
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
_variable_with_weight_decay
|
Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
# MASKED: _variable_with_weight_decay function (lines 115-139)
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
| 115
| 139
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
distorted_inputs
|
Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
# MASKED: distorted_inputs function (lines 142-160)
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
| 142
| 160
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
inputs
|
Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
# MASKED: inputs function (lines 163-185)
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
| 163
| 185
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
plot_ellipsoid_3D
|
Plot an ellipsoid in 3D
Based on
https://stackoverflow.com/questions/7819498/plotting-ellipsoid-with-matplotlib
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 15:49:49 2017
@author: tkoller
"""
import numpy as np
import numpy.linalg as nLa
from ..utils import unavailable
try:
import matplotlib.pyplot as plt
_has_matplotlib = True
except:
_has_matplotlib = False
# MASKED: plot_ellipsoid_3D function (lines 19-65)
@unavailable(not _has_matplotlib, "matplotlib")
def plot_ellipsoid_2D(p, q, ax, n_points=100, color="r"):
""" Plot an ellipsoid in 2D
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
"""
plt.sca(ax)
r = nLa.cholesky(q).T; # checks spd inside the function
t = np.linspace(0, 2 * np.pi, n_points);
z = [np.cos(t), np.sin(t)];
ellipse = np.dot(r, z) + p;
handle, = ax.plot(ellipse[0, :], ellipse[1, :], color)
return ax, handle
|
@unavailable(not _has_matplotlib, "matplotlib")
def plot_ellipsoid_3D(p, q, ax, n_points=100):
""" Plot an ellipsoid in 3D
Based on
https://stackoverflow.com/questions/7819498/plotting-ellipsoid-with-matplotlib
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
"""
assert np.shape(p) == (3, 1), "p needs to be a 3x1 vector"
assert np.shape(q) == (3, 3), "q needs to be a spd 3x3 matrix"
assert np.allclose(q, 0.5 * (q + q.T), "q needs to be spd")
# transform to radius/center parametrization
U, s, rotation = linalg.svd(q)
assert np.all(s > 0), "q needs to be positive definite"
radii = 1.0 / np.sqrt(s)
# get x,y,z of sphere and transform
u = np.linspace(0.0, 2.0 * np.pi, n_points)
v = np.linspace(0.0, np.pi, n_points)
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
for i in range(len(x)):
for j in range(len(x)):
[x[i, j], y[i, j], z[i, j]] = np.dot([x[i, j], y[i, j], z[i, j]],
rotation) + center
# plot the result
ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color='b', alpha=0.2)
return ax
| 19
| 65
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 15:49:49 2017
@author: tkoller
"""
import numpy as np
import numpy.linalg as nLa
from ..utils import unavailable
try:
import matplotlib.pyplot as plt
_has_matplotlib = True
except:
_has_matplotlib = False
@unavailable(not _has_matplotlib, "matplotlib")
def plot_ellipsoid_3D(p, q, ax, n_points=100):
""" Plot an ellipsoid in 3D
Based on
https://stackoverflow.com/questions/7819498/plotting-ellipsoid-with-matplotlib
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
"""
assert np.shape(p) == (3, 1), "p needs to be a 3x1 vector"
assert np.shape(q) == (3, 3), "q needs to be a spd 3x3 matrix"
assert np.allclose(q, 0.5 * (q + q.T), "q needs to be spd")
# transform to radius/center parametrization
U, s, rotation = linalg.svd(q)
assert np.all(s > 0), "q needs to be positive definite"
radii = 1.0 / np.sqrt(s)
# get x,y,z of sphere and transform
u = np.linspace(0.0, 2.0 * np.pi, n_points)
v = np.linspace(0.0, np.pi, n_points)
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
for i in range(len(x)):
for j in range(len(x)):
[x[i, j], y[i, j], z[i, j]] = np.dot([x[i, j], y[i, j], z[i, j]],
rotation) + center
# plot the result
ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color='b', alpha=0.2)
return ax
@unavailable(not _has_matplotlib, "matplotlib")
def plot_ellipsoid_2D(p, q, ax, n_points=100, color="r"):
""" Plot an ellipsoid in 2D
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
"""
plt.sca(ax)
r = nLa.cholesky(q).T; # checks spd inside the function
t = np.linspace(0, 2 * np.pi, n_points);
z = [np.cos(t), np.sin(t)];
ellipse = np.dot(r, z) + p;
handle, = ax.plot(ellipse[0, :], ellipse[1, :], color)
return ax, handle
|
plot_ellipsoid_2D
|
Plot an ellipsoid in 2D
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 15:49:49 2017
@author: tkoller
"""
import numpy as np
import numpy.linalg as nLa
from ..utils import unavailable
try:
import matplotlib.pyplot as plt
_has_matplotlib = True
except:
_has_matplotlib = False
@unavailable(not _has_matplotlib, "matplotlib")
def plot_ellipsoid_3D(p, q, ax, n_points=100):
""" Plot an ellipsoid in 3D
Based on
https://stackoverflow.com/questions/7819498/plotting-ellipsoid-with-matplotlib
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
"""
assert np.shape(p) == (3, 1), "p needs to be a 3x1 vector"
assert np.shape(q) == (3, 3), "q needs to be a spd 3x3 matrix"
assert np.allclose(q, 0.5 * (q + q.T), "q needs to be spd")
# transform to radius/center parametrization
U, s, rotation = linalg.svd(q)
assert np.all(s > 0), "q needs to be positive definite"
radii = 1.0 / np.sqrt(s)
# get x,y,z of sphere and transform
u = np.linspace(0.0, 2.0 * np.pi, n_points)
v = np.linspace(0.0, np.pi, n_points)
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
for i in range(len(x)):
for j in range(len(x)):
[x[i, j], y[i, j], z[i, j]] = np.dot([x[i, j], y[i, j], z[i, j]],
rotation) + center
# plot the result
ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color='b', alpha=0.2)
return ax
# MASKED: plot_ellipsoid_2D function (lines 68-95)
|
@unavailable(not _has_matplotlib, "matplotlib")
def plot_ellipsoid_2D(p, q, ax, n_points=100, color="r"):
""" Plot an ellipsoid in 2D
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
"""
plt.sca(ax)
r = nLa.cholesky(q).T; # checks spd inside the function
t = np.linspace(0, 2 * np.pi, n_points);
z = [np.cos(t), np.sin(t)];
ellipse = np.dot(r, z) + p;
handle, = ax.plot(ellipse[0, :], ellipse[1, :], color)
return ax, handle
| 68
| 95
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 15:49:49 2017
@author: tkoller
"""
import numpy as np
import numpy.linalg as nLa
from ..utils import unavailable
try:
import matplotlib.pyplot as plt
_has_matplotlib = True
except:
_has_matplotlib = False
@unavailable(not _has_matplotlib, "matplotlib")
def plot_ellipsoid_3D(p, q, ax, n_points=100):
""" Plot an ellipsoid in 3D
Based on
https://stackoverflow.com/questions/7819498/plotting-ellipsoid-with-matplotlib
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
"""
assert np.shape(p) == (3, 1), "p needs to be a 3x1 vector"
assert np.shape(q) == (3, 3), "q needs to be a spd 3x3 matrix"
assert np.allclose(q, 0.5 * (q + q.T), "q needs to be spd")
# transform to radius/center parametrization
U, s, rotation = linalg.svd(q)
assert np.all(s > 0), "q needs to be positive definite"
radii = 1.0 / np.sqrt(s)
# get x,y,z of sphere and transform
u = np.linspace(0.0, 2.0 * np.pi, n_points)
v = np.linspace(0.0, np.pi, n_points)
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
for i in range(len(x)):
for j in range(len(x)):
[x[i, j], y[i, j], z[i, j]] = np.dot([x[i, j], y[i, j], z[i, j]],
rotation) + center
# plot the result
ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color='b', alpha=0.2)
return ax
@unavailable(not _has_matplotlib, "matplotlib")
def plot_ellipsoid_2D(p, q, ax, n_points=100, color="r"):
""" Plot an ellipsoid in 2D
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
"""
plt.sca(ax)
r = nLa.cholesky(q).T; # checks spd inside the function
t = np.linspace(0, 2 * np.pi, n_points);
z = [np.cos(t), np.sin(t)];
ellipse = np.dot(r, z) + p;
handle, = ax.plot(ellipse[0, :], ellipse[1, :], color)
return ax, handle
|
__init__
|
Positions: 3 x nAtom matrix. Given in atomic units (ABohr).
Elements: element name (e.g., H) for each of the positions.
Orientations: If given, a [3,3,N] array encoding the standard
orientation of the given atoms (for replicating potentials!). For
each atom there is a orthogonal 3x3 matrix denoting the ex,ey,ez
directions.
|
# TODO: By PySCF-1.5 release
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 1. code style
# * Indent: 3 -> 4
# * Constant should be all uppercase
# * Function/method should be all lowercase
# * Line wrap around 80 columns
# * Use either double quote or single quote, not mix
#
# 2. Conventions required by PySCF
# * Use PYSCF_TMPDIR to replace _TmpDir
#
# 3. Use proper functions provided by PySCF
#
# This file is adapted with permission from the wmme program of Gerald Knizia.
# See http://sites.psu.edu/knizia/software/
#====================================================
from __future__ import print_function
import numpy as np
from numpy import dot, array
from os import path
from sys import version_info
def GetModulePath():
# (hopefully) return the path of the .py file.
# idea is to leave wmme.py in the same directory as the wmme executable,
# and import invoke the scripts using it via, for example,
# PYTHONPATH=$HOME/dev/wmme:$PYTHONPATH python myscriptfile.py
import inspect
return path.dirname(path.abspath(inspect.getfile(inspect.currentframe())))
if 0:
# set executable/basis library directory explicitly.
_WmmeDir = "/home/cgk/dev/wmme"
else:
# set executable/basis library from path of wmme.py
_WmmeDir = None
_TmpDir = None # if None: use operating system default
_BasisLibDir = None # if None: same as _WmmeDir/bases
#ToAng = 0.5291772108
ToAng = 0.529177209 # molpro default.
def ElementNameDummy():
ElementNames = "X H He Li Be B C N O F Ne Na Mg Al Si P S Cl Ar K Ca Sc Ti V Cr Mn Fe Co Ni Cu Zn Ga Ge As Se Br Kr Rb Sr Y Zr Nb Mo Tc Ru Rh Pd Ag Cd In Sn Sb Te I Xe Cs Ba La Ce Pr Nd Pm Sm Eu Gd Tb Dy Ho Er Tm Yb Lu Hf Ta W Re Os Ir Pt Au Hg Tl Pb Bi Po At Rn".split()
ElementNumbers = dict([(o,i) for (i,o) in enumerate(ElementNames)])
return ElementNames, ElementNumbers
ElementNames, ElementNumbers = ElementNameDummy()
def mdot(*args):
"""chained matrix product: mdot(A,B,C,..) = A*B*C*...
No attempt is made to optimize the contraction order."""
r = args[0]
for a in args[1:]:
r = dot(r,a)
return r
def dot2(A,B): return dot(A.flatten(),B.flatten())
def nCartY(l):
return ((l+1)*(l+2))/2
class FAtom(object):
def __init__(self, Element, Position, Index):
self.Element = Element
self.Pos = Position
self.Index = Index
@property
def Label(self):
# return element and center index combined.
return "%2s%3s"%(self.Element,1 + self.Index)
@property
def iElement(self):
return ElementNumbers[self.Element]
def __str__(self):
return "%s (%6.3f,%6.3f,%6.3f)"%(self.Label, self.Pos[0], self.Pos[1], self.Pos[2])
class FAtomSet(object):
# MASKED: __init__ function (lines 95-106)
def MakeXyz(self,NumFmt = "%15.8f",Scale=1.):
Lines = []
for i in range(len(self.Elements)):
Lines.append(" %5s {0} {0} {0}".format(NumFmt) % (\
self.Elements[i], Scale*self.Pos[0,i], Scale*self.Pos[1,i], Scale*self.Pos[2,i]))
return "\n".join(Lines)
def nElecNeutral(self):
"""return number of electrons present in the total system if neutral."""
return sum([ElementNumbers[o] for o in self.Elements])
def fCoreRepulsion1(self, iAt, jAt):
if iAt == jAt: return 0. # <- a core doesn't repulse itself.
ChA, ChB = [ElementNumbers[self.Elements[o]] for o in [iAt, jAt]]
return ChA * ChB / np.sum((self.Pos[:,iAt] - self.Pos[:,jAt])**2)**.5
def fCoreRepulsion(self):
N = len(self.Elements)
Charges = array([ElementNumbers[o] for o in self.Elements])
fCoreEnergy = 0
for i in range(N):
for j in range(i):
fCoreEnergy += self.fCoreRepulsion1(i,j)
#fCoreEnergy += Charges[i] * Charges[j] / np.sum((self.Pos[:,i] - self.Pos[:,j])**2)**.5
return fCoreEnergy
def __str__(self):
Caption = " %5s%15s %15s %15s" % ("ATOM", "POS/X", "POS/Y", "POS/Z")
return Caption + "\n" + self.MakeXyz()
def __len__(self): return len(self.Elements)
def __getitem__(self,key): return FAtom(self.Elements[key], self.Pos[:,key], key)
def __iter__(self):
for (iAt,(Type,Xyz)) in enumerate(zip(self.Elements, self.Pos.T)):
#yield (Type,Xyz)
yield FAtom(Type, Xyz, iAt)
class FBasisShell(object):
"""A generally contracted shell of spherical harmonic basis functions."""
def __init__(self, l, Exp, Co):
self.l = l
assert(isinstance(l,int) and l >= 0 and l <= 8)
self.Exp = np.array(Exp)
assert(self.Exp.ndim == 1)
self.Co = np.array(Co)
assert(self.Co.ndim == 2 and self.Co.shape[0] == len(self.Exp))
self.Element = None # designated element for the basis function
self.Comment = None # comment on the basis function (e.g., literature reference)
@property
def nExp(self):
return len(self.Exp)
@property
def nCo(self):
return self.Co.shape[1]
@property
def nFn(self):
return self.nCo * (2*self.l + 1)
@property
def nFnCa(self):
return self.nCo * nCartY(self.l)
@property
def AngMom(self): return self.l
def __str__(self):
Lines = []
Lines.append("BasisShell [l = %i, nExp = %i, nCo = %i]" % (self.l, self.nExp, self.nCo))
def FmtA(L):
return ", ".join("%12.5f" % o for o in L)
Lines.append(" Exps = [%s]" % FmtA(self.Exp))
for iCo in range(self.nCo):
Lines.append(" Co[%2i] = [%s]" % (iCo, FmtA(self.Co[:,iCo])))
return "\n".join(Lines)
class FBasisShell1(object):
"""A FBasisShell which is placed on a concrete atom."""
def __init__(self, Atom, ShellFn):
self.Atom = Atom
self.Fn = ShellFn
assert(isinstance(self.Fn, FBasisShell))
@property
def Pos(self):
return self.Atom.Pos
@property
def iAtom(self):
return self.Atom.Index
@property
def l(self): return self.Fn.l
@property
def nExp(self): return self.Fn.nExp
@property
def Exp(self): return self.Fn.Exp
@property
def nCo(self): return self.Fn.nCo
@property
def Co(self): return self.Fn.Co
@property
def nFn(self): return self.Fn.nFn
@property
def nFnCa(self): return self.Fn.nFnCa
class FBasisSet(object):
def __init__(self, Shells, Atoms):
# list of FBasisShell1 objects.
self.Shells = Shells
self.Atoms = Atoms
@property
def nFn(self):
n = 0
for Sh in self.Shells:
n += Sh.nFn
return n
@property
def nFnCa(self):
n = 0
for Sh in self.Shells:
n += Sh.nFnCa
return n
def __str__(self):
Lines = []
for o in self.Shells:
Lines.append("Atom %s %s" % (o.Atom, o.Fn))
return "\n".join(Lines)
def FmtCr(self):
#f = 1./ToAng
f = 1.
Lines = []
def Emit(s):
Lines.append(s)
def EmitArray(Name, A):
#Emit(" " + Name + "<" + " ".join("%.16e"%o for o in A) + ">")
Emit(" " + Name + "<" + " ".join("%r"%o for o in A) + ">")
# collect all unique FBasisShell objects.
BasisFns = []
BasisFnIds = {} # map id(BasisFn)->(index)
for Shell in self.Shells:
if id(Shell.Fn) not in BasisFnIds:
BasisFnIds[id(Shell.Fn)] = len(BasisFns)
BasisFns.append(Shell.Fn)
pass
Emit("Basis<Version<0.1> nFns<%i> nShells<%i>" % (len(BasisFns), len(self.Shells)))
# store the function declarations...
def EmitBasisFn(Fn):
Emit(" Fn<Id<%i> l<%i> nExp<%i> nCo<%i>" % (
BasisFnIds[id(Fn)], Fn.l, Fn.nExp, Fn.nCo))
EmitArray("Exp", Fn.Exp)
for Co in Fn.Co.T:
EmitArray("Co", Co)
Emit(" >")
pass
for Fn in BasisFns:
EmitBasisFn(Fn)
# ...and their distribution amongst atoms.
def EmitShell(Sh):
#Emit(" Shell<iAt<%i> x<%.16e> y<%.16e> z<%.16e> FnId<%i>>" % (
Emit(" Shell<iAt<%i> x<%r> y<%r> z<%r> FnId<%i>>" % (
Sh.Atom.Index, f*Sh.Atom.Pos[0], f*Sh.Atom.Pos[1], f*Sh.Atom.Pos[2], BasisFnIds[id(Sh.Fn)]))
pass
for Shell in self.Shells:
EmitShell(Shell)
Emit(">") # end of Basis
return "\n".join(Lines)
def GetAngmomList(self):
# list of all basis function angular momenta in the basis, for converting basis function orders and types.
ls = []
for Shell in self.Shells:
for iCo in range(Shell.nCo):
ls.append(Shell.l)
return ls
class FIntegralContext(object):
"""contains data describing how to evaluate quantum chemistry matrix
elements on electronic system as defined by the given atoms and basis
sets.
Note: Basis sets must either be basis set names (i.e., library names)
or FBasisSet objects.
"""
def __init__(self, Atoms, OrbBasis, FitBasis=None, BasisLibs=None):
self.Atoms = Atoms
self.OrbBasis = OrbBasis
self.FitBasis = FitBasis
self.BasisLibs = BasisLibs
def _InvokeBfint(self, Args, Outputs=None, Inputs=None, MoreBases=None):
Bases = {}
if self.OrbBasis: Bases['--basis-orb'] = self.OrbBasis
if self.FitBasis: Bases['--basis-fit'] = self.FitBasis
if MoreBases:
Bases = dict(list(Bases.items()) + list(MoreBases.items()))
return _InvokeBfint(self.Atoms, Bases, self.BasisLibs, Args, Outputs, Inputs)
def MakeBaseIntegrals(self, Smh=True, MakeS=False):
"""Invoke bfint to calculate CoreEnergy (scalar), CoreH (nOrb x nOrb),
Int2e_Frs (nFit x nOrb x nOrb), and overlap matrix (nOrb x nOrb)"""
# assemble arguments to integral generation program
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
# ^- calculate integrals in symmetrically orthogonalized AO basis
Outputs = []
Outputs.append(("--save-coreh", "INT1E"))
Outputs.append(("--save-fint2e", "INT2E"))
Outputs.append(("--save-overlap", "OVERLAP"))
CoreH, Int2e, Overlap = self._InvokeBfint(Args, Outputs)
nOrb = CoreH.shape[0]
Int2e = Int2e.reshape((Int2e.shape[0], nOrb, nOrb))
CoreEnergy = self.Atoms.fCoreRepulsion()
if MakeS:
return CoreEnergy, CoreH, Int2e, Overlap
else:
return CoreEnergy, CoreH, Int2e
def MakeOverlaps2(self, OrbBasis2):
"""calculate overlap between current basis and a second basis, as
described in OrbBasis2. Returns <1|2> and <2|2> matrices."""
Args = []
MoreBases = {'--basis-orb-2': OrbBasis2}
Outputs = []
Outputs.append(("--save-overlap-2", "OVERLAP_2"))
Outputs.append(("--save-overlap-12", "OVERLAP_12"))
#Outputs.append(("--save-overlap", "OVERLAP"))
Overlap2, Overlap12 = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
return Overlap2, Overlap12
def MakeOverlap(self, OrbBasis2=None):
"""calculate overlap within main orbital basis, and, optionally, between main
orbital basis and a second basis, as described in OrbBasis2.
Returns <1|1>, <1|2>, and <2|2> matrices."""
Args = []
Outputs = []
Outputs.append(("--save-overlap", "OVERLAP_1"))
if OrbBasis2 is not None:
MoreBases = {'--basis-orb-2': OrbBasis2}
Outputs.append(("--save-overlap-12", "OVERLAP_12"))
Outputs.append(("--save-overlap-2", "OVERLAP_2"))
return self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
else:
MoreBases = None
Overlap, = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
return Overlap
def MakeNuclearAttractionIntegrals(self, Smh=True):
"""calculate nuclear attraction integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-vnucN", "VNUC_N"))
VNucN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(VNucN.shape[0]**.5 + .5)
assert(nOrb**2 == VNucN.shape[0])
assert(VNucN.shape[1] == len(self.Atoms))
return VNucN.reshape(nOrb, nOrb, VNucN.shape[1])
def MakeNuclearSqDistanceIntegrals(self, Smh=True):
"""calculate <mu|(r-rA)^2|nu> integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-rsqN", "RSQ_N"))
RsqN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(RsqN.shape[0]**.5 + .5)
assert(nOrb**2 == RsqN.shape[0])
assert(RsqN.shape[1] == len(self.Atoms))
return RsqN.reshape(nOrb, nOrb, RsqN.shape[1])
def MakeKineticIntegrals(self, Smh=True):
"""calculate <mu|-1/2 Laplace|nu> integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-kinetic", "EKIN"))
Op = self._InvokeBfint(Args, Outputs)[0]
return Op
def MakeDipoleIntegrals(self, Smh=True):
r"""calculate dipole operator matrices <\mu|w|\nu> (w=x,y,z) in
main basis, for each direction. Returns nAo x nAo x 3 array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-dipole", "DIPN"))
DipN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(DipN.shape[0]**.5 + .5)
assert(nOrb**2 == DipN.shape[0])
assert(DipN.shape[1] == 3)
return DipN.reshape(nOrb, nOrb, 3)
def MakeOrbitalsOnGrid(self, Orbitals, Grid, DerivativeOrder=0):
"""calculate values of molecular orbitals on a grid of 3d points in space.
Input:
- Orbitals: nAo x nOrb matrix, where nAo must be compatible with
self.OrbBasis. The AO dimension must be contravariant AO (i.e., not SMH).
- Grid: 3 x nGrid array giving the coordinates of the grid points.
- DerivativeOrder: 0: only orbital values,
1: orbital values and 1st derivatives,
2: orbital values and up to 2nd derivatives.
Returns:
- nGrid x nDerivComp x nOrb array. If DerivativeOrder is 0, the
DerivComp dimension is omitted.
"""
Args = [("--eval-orbitals-dx=%s" % DerivativeOrder)]
Inputs = [("--eval-orbitals", "ORBITALS.npy", Orbitals)]\
+ [("--grid-coords", "GRID.npy", Grid)]
Outputs = [("--save-grid-values", "ORBS_ON_GRID")]
(ValuesOnGrid,) = self._InvokeBfint(Args, Outputs, Inputs)
nComp = [1,4,10][DerivativeOrder]
if nComp != 1:
ValuesOnGrid = ValuesOnGrid.reshape((Grid.shape[1], nComp, Orbitals.shape[1]))
return ValuesOnGrid
def MakeRaw2eIntegrals(self, Smh=True, Kernel2e="coulomb"):
"""compute Int2e_Frs (nFit x nOrb x nOrb) and fitting metric Int2e_FG (nFit x nFit),
where the fitting metric is *not* absorbed into the 2e integrals."""
# assemble arguments to integral generation program
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
# ^- calculate integrals in symmetrically orthogonalized AO basis
Args.append("--kernel2e='%s'" % Kernel2e)
Args.append("--solve-fitting-eq=false")
Outputs = []
Outputs.append(("--save-fint2e", "INT2E_3IX"))
Outputs.append(("--save-fitting-metric", "INT2E_METRIC"))
Int2e_Frs, Int2e_FG = self._InvokeBfint(Args, Outputs)
nOrb = int(Int2e_Frs.shape[1]**.5 + .5)
assert(nOrb**2 == Int2e_Frs.shape[1])
Int2e_Frs = Int2e_Frs.reshape((Int2e_Frs.shape[0], nOrb, nOrb))
assert(Int2e_Frs.shape[0] == Int2e_FG.shape[0])
assert(Int2e_FG.shape[0] == Int2e_FG.shape[1])
return Int2e_FG, Int2e_Frs
def _InvokeBfint(Atoms, Bases, BasisLibs, BaseArgs, Outputs, Inputs=None):
"""Outputs: an array of tuples (cmdline-arguments,filename-base).
We will generate arguments for each of them and try to read the
corresponding files as numpy arrays and return them in order."""
from tempfile import mkdtemp
from shutil import rmtree
#from commands import getstatusoutput
from subprocess import check_output, CalledProcessError
# make a directory to store our input/output in.
BasePath = mkdtemp(prefix="wmme.", dir=_TmpDir)
def Cleanup():
rmtree(BasePath)
pass
BfIntDir = _WmmeDir
if BfIntDir is None: BfIntDir = GetModulePath()
BasisLibDir = _BasisLibDir
if BasisLibDir is None:
BasisLibDir = path.join(BfIntDir,"bases")
MakeIntegralsExecutable = path.join(BfIntDir,"wmme")
# assemble arguments to integral generation program
FileNameXyz = path.join(BasePath, "ATOMS")
Args = [o for o in BaseArgs]
Args.append("--matrix-format=npy")
for BasisLib in BasisLibs:
Args.append("--basis-lib=%s" % path.join(BasisLibDir, BasisLib))
Args.append("--atoms-au=%s" % FileNameXyz)
iWrittenBasis = 0
for (ParamName, BasisObj) in Bases.items():
if BasisObj is None:
continue
if isinstance(BasisObj, FBasisSet):
# basis is given as an explicit FBasisSet object.
# Write the basis set to disk and supply the file name as argument
BasisFile = path.join(BasePath, "BASIS%i" % iWrittenBasis)
iWrittenBasis += 1
with open(BasisFile, "w") as File:
File.write(BasisObj.FmtCr())
Args.append("%s='!%s'" % (ParamName, BasisFile))
else:
assert(isinstance(BasisObj, str))
# it's just a basis set name: append the name to the arguments.
# (set will be read from library by wmme itself)
Args.append("%s=%s" % (ParamName, BasisObj))
pass
# make file names and arguments for output arrays
FileNameOutputs = []
for (ArgName,FileNameBase) in Outputs:
FileName = path.join(BasePath, FileNameBase)
FileNameOutputs.append(FileName)
Args.append("%s='%s'" % (ArgName, FileName))
XyzLines = "%i\n\n%s\n" % (len(Atoms), Atoms.MakeXyz("%24.16f"))
# ^- note on the .16f: it actually does make a difference. I had .8f
# there before, and it lead to energy changes on the order of 1e-8
# when treating only non-redundant subsystem out of a symmetric
# arrangement.
try:
with open(FileNameXyz, "w") as File:
File.write(XyzLines)
# save input arrays if provided.
if Inputs:
for (ArgName,FileNameBase,Array) in Inputs:
FileName = path.join(BasePath, FileNameBase)
np.save(FileName,Array)
Args.append("%s='%s'" % (ArgName, FileName))
Cmd = "%s %s" % (MakeIntegralsExecutable, " ".join(Args))
#print("!Invoking %s\n" % Cmd)
#iErr, Output = getstatusoutput(Cmd)
#if ( iErr != 0 ):
try:
Output = check_output(Cmd, shell=True)
if (version_info) >= (3,0):
# it returns a byte string in Python 3... which wouldn't be a problem
# if not all OTHER literals were converted to unicode implicitly.
Output = Output.decode("utf-8")
except CalledProcessError as e:
raise Exception("Integral calculation failed. Output was:\n%s\nException was: %s" % (e.output, str(e)))
OutputArrays = []
for FileName in FileNameOutputs:
OutputArrays.append(np.load(FileName))
except:
Cleanup()
raise
# got everything we need. Delete the temporary directory.
Cleanup()
return tuple(OutputArrays)
def ReadXyzFile(FileName,Scale=1./ToAng):
Text = open(FileName,"r").read()
Lines = Text.splitlines()
# allowed formats: <nAtoms> \n Desc \n <atom-list>
# or: <atom-list> (without any headers)
# in the first case, only the first nAtoms+2 lines are read, in the
# second case everything which does not look like a xyz line is
# ignored.
nAtoms = None
r = 0,-1
if ( len(Lines[0].split()) == 1 ):
nAtoms = int(Lines[0].split()[0])
r = 2,nAtoms+2
Atoms = []
Xyz = []
for Line in Lines:
ls = Line.split()
try:
Atom = ls[0]
x,y,z = float(ls[1]), float(ls[2]), float(ls[3])
except:
continue
Atom = Atom[0].upper() + Atom[1:].lower()
# maybe we should allow for (and ignore) group numbers after the
# elements?
if Atom not in ElementNames:
raise Exception("while reading '%s': unrecognized element '%s'." % (FileName,Atom))
Atoms.append(Atom)
Xyz.append((x,y,z))
Xyz = Scale*array(Xyz).T
if 0:
print("*read '%s':\n%s" % (FileName, str(FAtomSet(Xyz, Atoms))))
return Xyz, Atoms
def ReadAtomsFromXyzFile(FileName, Scale=1./ToAng):
Xyz,Elements = ReadXyzFile(FileName, Scale)
return FAtomSet(Xyz, Elements)
|
def __init__(self, Positions, Elements, Orientations=None, Name=None):
"""Positions: 3 x nAtom matrix. Given in atomic units (ABohr).
Elements: element name (e.g., H) for each of the positions.
Orientations: If given, a [3,3,N] array encoding the standard
orientation of the given atoms (for replicating potentials!). For
each atom there is a orthogonal 3x3 matrix denoting the ex,ey,ez
directions."""
self.Pos = Positions
assert(self.Pos.shape[0] == 3 and self.Pos.shape[1] == len(Elements))
self.Elements = Elements
self.Orientations = Orientations
self.Name = Name
| 95
| 106
|
# TODO: By PySCF-1.5 release
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 1. code style
# * Indent: 3 -> 4
# * Constant should be all uppercase
# * Function/method should be all lowercase
# * Line wrap around 80 columns
# * Use either double quote or single quote, not mix
#
# 2. Conventions required by PySCF
# * Use PYSCF_TMPDIR to replace _TmpDir
#
# 3. Use proper functions provided by PySCF
#
# This file is adapted with permission from the wmme program of Gerald Knizia.
# See http://sites.psu.edu/knizia/software/
#====================================================
from __future__ import print_function
import numpy as np
from numpy import dot, array
from os import path
from sys import version_info
def GetModulePath():
# (hopefully) return the path of the .py file.
# idea is to leave wmme.py in the same directory as the wmme executable,
# and import invoke the scripts using it via, for example,
# PYTHONPATH=$HOME/dev/wmme:$PYTHONPATH python myscriptfile.py
import inspect
return path.dirname(path.abspath(inspect.getfile(inspect.currentframe())))
if 0:
# set executable/basis library directory explicitly.
_WmmeDir = "/home/cgk/dev/wmme"
else:
# set executable/basis library from path of wmme.py
_WmmeDir = None
_TmpDir = None # if None: use operating system default
_BasisLibDir = None # if None: same as _WmmeDir/bases
#ToAng = 0.5291772108
ToAng = 0.529177209 # molpro default.
def ElementNameDummy():
ElementNames = "X H He Li Be B C N O F Ne Na Mg Al Si P S Cl Ar K Ca Sc Ti V Cr Mn Fe Co Ni Cu Zn Ga Ge As Se Br Kr Rb Sr Y Zr Nb Mo Tc Ru Rh Pd Ag Cd In Sn Sb Te I Xe Cs Ba La Ce Pr Nd Pm Sm Eu Gd Tb Dy Ho Er Tm Yb Lu Hf Ta W Re Os Ir Pt Au Hg Tl Pb Bi Po At Rn".split()
ElementNumbers = dict([(o,i) for (i,o) in enumerate(ElementNames)])
return ElementNames, ElementNumbers
ElementNames, ElementNumbers = ElementNameDummy()
def mdot(*args):
"""chained matrix product: mdot(A,B,C,..) = A*B*C*...
No attempt is made to optimize the contraction order."""
r = args[0]
for a in args[1:]:
r = dot(r,a)
return r
def dot2(A,B): return dot(A.flatten(),B.flatten())
def nCartY(l):
return ((l+1)*(l+2))/2
class FAtom(object):
def __init__(self, Element, Position, Index):
self.Element = Element
self.Pos = Position
self.Index = Index
@property
def Label(self):
# return element and center index combined.
return "%2s%3s"%(self.Element,1 + self.Index)
@property
def iElement(self):
return ElementNumbers[self.Element]
def __str__(self):
return "%s (%6.3f,%6.3f,%6.3f)"%(self.Label, self.Pos[0], self.Pos[1], self.Pos[2])
class FAtomSet(object):
def __init__(self, Positions, Elements, Orientations=None, Name=None):
"""Positions: 3 x nAtom matrix. Given in atomic units (ABohr).
Elements: element name (e.g., H) for each of the positions.
Orientations: If given, a [3,3,N] array encoding the standard
orientation of the given atoms (for replicating potentials!). For
each atom there is a orthogonal 3x3 matrix denoting the ex,ey,ez
directions."""
self.Pos = Positions
assert(self.Pos.shape[0] == 3 and self.Pos.shape[1] == len(Elements))
self.Elements = Elements
self.Orientations = Orientations
self.Name = Name
def MakeXyz(self,NumFmt = "%15.8f",Scale=1.):
Lines = []
for i in range(len(self.Elements)):
Lines.append(" %5s {0} {0} {0}".format(NumFmt) % (\
self.Elements[i], Scale*self.Pos[0,i], Scale*self.Pos[1,i], Scale*self.Pos[2,i]))
return "\n".join(Lines)
def nElecNeutral(self):
"""return number of electrons present in the total system if neutral."""
return sum([ElementNumbers[o] for o in self.Elements])
def fCoreRepulsion1(self, iAt, jAt):
if iAt == jAt: return 0. # <- a core doesn't repulse itself.
ChA, ChB = [ElementNumbers[self.Elements[o]] for o in [iAt, jAt]]
return ChA * ChB / np.sum((self.Pos[:,iAt] - self.Pos[:,jAt])**2)**.5
def fCoreRepulsion(self):
N = len(self.Elements)
Charges = array([ElementNumbers[o] for o in self.Elements])
fCoreEnergy = 0
for i in range(N):
for j in range(i):
fCoreEnergy += self.fCoreRepulsion1(i,j)
#fCoreEnergy += Charges[i] * Charges[j] / np.sum((self.Pos[:,i] - self.Pos[:,j])**2)**.5
return fCoreEnergy
def __str__(self):
Caption = " %5s%15s %15s %15s" % ("ATOM", "POS/X", "POS/Y", "POS/Z")
return Caption + "\n" + self.MakeXyz()
def __len__(self): return len(self.Elements)
def __getitem__(self,key): return FAtom(self.Elements[key], self.Pos[:,key], key)
def __iter__(self):
for (iAt,(Type,Xyz)) in enumerate(zip(self.Elements, self.Pos.T)):
#yield (Type,Xyz)
yield FAtom(Type, Xyz, iAt)
class FBasisShell(object):
"""A generally contracted shell of spherical harmonic basis functions."""
def __init__(self, l, Exp, Co):
self.l = l
assert(isinstance(l,int) and l >= 0 and l <= 8)
self.Exp = np.array(Exp)
assert(self.Exp.ndim == 1)
self.Co = np.array(Co)
assert(self.Co.ndim == 2 and self.Co.shape[0] == len(self.Exp))
self.Element = None # designated element for the basis function
self.Comment = None # comment on the basis function (e.g., literature reference)
@property
def nExp(self):
return len(self.Exp)
@property
def nCo(self):
return self.Co.shape[1]
@property
def nFn(self):
return self.nCo * (2*self.l + 1)
@property
def nFnCa(self):
return self.nCo * nCartY(self.l)
@property
def AngMom(self): return self.l
def __str__(self):
Lines = []
Lines.append("BasisShell [l = %i, nExp = %i, nCo = %i]" % (self.l, self.nExp, self.nCo))
def FmtA(L):
return ", ".join("%12.5f" % o for o in L)
Lines.append(" Exps = [%s]" % FmtA(self.Exp))
for iCo in range(self.nCo):
Lines.append(" Co[%2i] = [%s]" % (iCo, FmtA(self.Co[:,iCo])))
return "\n".join(Lines)
class FBasisShell1(object):
"""A FBasisShell which is placed on a concrete atom."""
def __init__(self, Atom, ShellFn):
self.Atom = Atom
self.Fn = ShellFn
assert(isinstance(self.Fn, FBasisShell))
@property
def Pos(self):
return self.Atom.Pos
@property
def iAtom(self):
return self.Atom.Index
@property
def l(self): return self.Fn.l
@property
def nExp(self): return self.Fn.nExp
@property
def Exp(self): return self.Fn.Exp
@property
def nCo(self): return self.Fn.nCo
@property
def Co(self): return self.Fn.Co
@property
def nFn(self): return self.Fn.nFn
@property
def nFnCa(self): return self.Fn.nFnCa
class FBasisSet(object):
def __init__(self, Shells, Atoms):
# list of FBasisShell1 objects.
self.Shells = Shells
self.Atoms = Atoms
@property
def nFn(self):
n = 0
for Sh in self.Shells:
n += Sh.nFn
return n
@property
def nFnCa(self):
n = 0
for Sh in self.Shells:
n += Sh.nFnCa
return n
def __str__(self):
Lines = []
for o in self.Shells:
Lines.append("Atom %s %s" % (o.Atom, o.Fn))
return "\n".join(Lines)
def FmtCr(self):
#f = 1./ToAng
f = 1.
Lines = []
def Emit(s):
Lines.append(s)
def EmitArray(Name, A):
#Emit(" " + Name + "<" + " ".join("%.16e"%o for o in A) + ">")
Emit(" " + Name + "<" + " ".join("%r"%o for o in A) + ">")
# collect all unique FBasisShell objects.
BasisFns = []
BasisFnIds = {} # map id(BasisFn)->(index)
for Shell in self.Shells:
if id(Shell.Fn) not in BasisFnIds:
BasisFnIds[id(Shell.Fn)] = len(BasisFns)
BasisFns.append(Shell.Fn)
pass
Emit("Basis<Version<0.1> nFns<%i> nShells<%i>" % (len(BasisFns), len(self.Shells)))
# store the function declarations...
def EmitBasisFn(Fn):
Emit(" Fn<Id<%i> l<%i> nExp<%i> nCo<%i>" % (
BasisFnIds[id(Fn)], Fn.l, Fn.nExp, Fn.nCo))
EmitArray("Exp", Fn.Exp)
for Co in Fn.Co.T:
EmitArray("Co", Co)
Emit(" >")
pass
for Fn in BasisFns:
EmitBasisFn(Fn)
# ...and their distribution amongst atoms.
def EmitShell(Sh):
#Emit(" Shell<iAt<%i> x<%.16e> y<%.16e> z<%.16e> FnId<%i>>" % (
Emit(" Shell<iAt<%i> x<%r> y<%r> z<%r> FnId<%i>>" % (
Sh.Atom.Index, f*Sh.Atom.Pos[0], f*Sh.Atom.Pos[1], f*Sh.Atom.Pos[2], BasisFnIds[id(Sh.Fn)]))
pass
for Shell in self.Shells:
EmitShell(Shell)
Emit(">") # end of Basis
return "\n".join(Lines)
def GetAngmomList(self):
# list of all basis function angular momenta in the basis, for converting basis function orders and types.
ls = []
for Shell in self.Shells:
for iCo in range(Shell.nCo):
ls.append(Shell.l)
return ls
class FIntegralContext(object):
"""contains data describing how to evaluate quantum chemistry matrix
elements on electronic system as defined by the given atoms and basis
sets.
Note: Basis sets must either be basis set names (i.e., library names)
or FBasisSet objects.
"""
def __init__(self, Atoms, OrbBasis, FitBasis=None, BasisLibs=None):
self.Atoms = Atoms
self.OrbBasis = OrbBasis
self.FitBasis = FitBasis
self.BasisLibs = BasisLibs
def _InvokeBfint(self, Args, Outputs=None, Inputs=None, MoreBases=None):
Bases = {}
if self.OrbBasis: Bases['--basis-orb'] = self.OrbBasis
if self.FitBasis: Bases['--basis-fit'] = self.FitBasis
if MoreBases:
Bases = dict(list(Bases.items()) + list(MoreBases.items()))
return _InvokeBfint(self.Atoms, Bases, self.BasisLibs, Args, Outputs, Inputs)
def MakeBaseIntegrals(self, Smh=True, MakeS=False):
"""Invoke bfint to calculate CoreEnergy (scalar), CoreH (nOrb x nOrb),
Int2e_Frs (nFit x nOrb x nOrb), and overlap matrix (nOrb x nOrb)"""
# assemble arguments to integral generation program
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
# ^- calculate integrals in symmetrically orthogonalized AO basis
Outputs = []
Outputs.append(("--save-coreh", "INT1E"))
Outputs.append(("--save-fint2e", "INT2E"))
Outputs.append(("--save-overlap", "OVERLAP"))
CoreH, Int2e, Overlap = self._InvokeBfint(Args, Outputs)
nOrb = CoreH.shape[0]
Int2e = Int2e.reshape((Int2e.shape[0], nOrb, nOrb))
CoreEnergy = self.Atoms.fCoreRepulsion()
if MakeS:
return CoreEnergy, CoreH, Int2e, Overlap
else:
return CoreEnergy, CoreH, Int2e
def MakeOverlaps2(self, OrbBasis2):
"""calculate overlap between current basis and a second basis, as
described in OrbBasis2. Returns <1|2> and <2|2> matrices."""
Args = []
MoreBases = {'--basis-orb-2': OrbBasis2}
Outputs = []
Outputs.append(("--save-overlap-2", "OVERLAP_2"))
Outputs.append(("--save-overlap-12", "OVERLAP_12"))
#Outputs.append(("--save-overlap", "OVERLAP"))
Overlap2, Overlap12 = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
return Overlap2, Overlap12
def MakeOverlap(self, OrbBasis2=None):
"""calculate overlap within main orbital basis, and, optionally, between main
orbital basis and a second basis, as described in OrbBasis2.
Returns <1|1>, <1|2>, and <2|2> matrices."""
Args = []
Outputs = []
Outputs.append(("--save-overlap", "OVERLAP_1"))
if OrbBasis2 is not None:
MoreBases = {'--basis-orb-2': OrbBasis2}
Outputs.append(("--save-overlap-12", "OVERLAP_12"))
Outputs.append(("--save-overlap-2", "OVERLAP_2"))
return self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
else:
MoreBases = None
Overlap, = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
return Overlap
def MakeNuclearAttractionIntegrals(self, Smh=True):
"""calculate nuclear attraction integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-vnucN", "VNUC_N"))
VNucN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(VNucN.shape[0]**.5 + .5)
assert(nOrb**2 == VNucN.shape[0])
assert(VNucN.shape[1] == len(self.Atoms))
return VNucN.reshape(nOrb, nOrb, VNucN.shape[1])
def MakeNuclearSqDistanceIntegrals(self, Smh=True):
"""calculate <mu|(r-rA)^2|nu> integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-rsqN", "RSQ_N"))
RsqN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(RsqN.shape[0]**.5 + .5)
assert(nOrb**2 == RsqN.shape[0])
assert(RsqN.shape[1] == len(self.Atoms))
return RsqN.reshape(nOrb, nOrb, RsqN.shape[1])
def MakeKineticIntegrals(self, Smh=True):
"""calculate <mu|-1/2 Laplace|nu> integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-kinetic", "EKIN"))
Op = self._InvokeBfint(Args, Outputs)[0]
return Op
def MakeDipoleIntegrals(self, Smh=True):
r"""calculate dipole operator matrices <\mu|w|\nu> (w=x,y,z) in
main basis, for each direction. Returns nAo x nAo x 3 array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-dipole", "DIPN"))
DipN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(DipN.shape[0]**.5 + .5)
assert(nOrb**2 == DipN.shape[0])
assert(DipN.shape[1] == 3)
return DipN.reshape(nOrb, nOrb, 3)
def MakeOrbitalsOnGrid(self, Orbitals, Grid, DerivativeOrder=0):
"""calculate values of molecular orbitals on a grid of 3d points in space.
Input:
- Orbitals: nAo x nOrb matrix, where nAo must be compatible with
self.OrbBasis. The AO dimension must be contravariant AO (i.e., not SMH).
- Grid: 3 x nGrid array giving the coordinates of the grid points.
- DerivativeOrder: 0: only orbital values,
1: orbital values and 1st derivatives,
2: orbital values and up to 2nd derivatives.
Returns:
- nGrid x nDerivComp x nOrb array. If DerivativeOrder is 0, the
DerivComp dimension is omitted.
"""
Args = [("--eval-orbitals-dx=%s" % DerivativeOrder)]
Inputs = [("--eval-orbitals", "ORBITALS.npy", Orbitals)]\
+ [("--grid-coords", "GRID.npy", Grid)]
Outputs = [("--save-grid-values", "ORBS_ON_GRID")]
(ValuesOnGrid,) = self._InvokeBfint(Args, Outputs, Inputs)
nComp = [1,4,10][DerivativeOrder]
if nComp != 1:
ValuesOnGrid = ValuesOnGrid.reshape((Grid.shape[1], nComp, Orbitals.shape[1]))
return ValuesOnGrid
def MakeRaw2eIntegrals(self, Smh=True, Kernel2e="coulomb"):
"""compute Int2e_Frs (nFit x nOrb x nOrb) and fitting metric Int2e_FG (nFit x nFit),
where the fitting metric is *not* absorbed into the 2e integrals."""
# assemble arguments to integral generation program
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
# ^- calculate integrals in symmetrically orthogonalized AO basis
Args.append("--kernel2e='%s'" % Kernel2e)
Args.append("--solve-fitting-eq=false")
Outputs = []
Outputs.append(("--save-fint2e", "INT2E_3IX"))
Outputs.append(("--save-fitting-metric", "INT2E_METRIC"))
Int2e_Frs, Int2e_FG = self._InvokeBfint(Args, Outputs)
nOrb = int(Int2e_Frs.shape[1]**.5 + .5)
assert(nOrb**2 == Int2e_Frs.shape[1])
Int2e_Frs = Int2e_Frs.reshape((Int2e_Frs.shape[0], nOrb, nOrb))
assert(Int2e_Frs.shape[0] == Int2e_FG.shape[0])
assert(Int2e_FG.shape[0] == Int2e_FG.shape[1])
return Int2e_FG, Int2e_Frs
def _InvokeBfint(Atoms, Bases, BasisLibs, BaseArgs, Outputs, Inputs=None):
"""Outputs: an array of tuples (cmdline-arguments,filename-base).
We will generate arguments for each of them and try to read the
corresponding files as numpy arrays and return them in order."""
from tempfile import mkdtemp
from shutil import rmtree
#from commands import getstatusoutput
from subprocess import check_output, CalledProcessError
# make a directory to store our input/output in.
BasePath = mkdtemp(prefix="wmme.", dir=_TmpDir)
def Cleanup():
rmtree(BasePath)
pass
BfIntDir = _WmmeDir
if BfIntDir is None: BfIntDir = GetModulePath()
BasisLibDir = _BasisLibDir
if BasisLibDir is None:
BasisLibDir = path.join(BfIntDir,"bases")
MakeIntegralsExecutable = path.join(BfIntDir,"wmme")
# assemble arguments to integral generation program
FileNameXyz = path.join(BasePath, "ATOMS")
Args = [o for o in BaseArgs]
Args.append("--matrix-format=npy")
for BasisLib in BasisLibs:
Args.append("--basis-lib=%s" % path.join(BasisLibDir, BasisLib))
Args.append("--atoms-au=%s" % FileNameXyz)
iWrittenBasis = 0
for (ParamName, BasisObj) in Bases.items():
if BasisObj is None:
continue
if isinstance(BasisObj, FBasisSet):
# basis is given as an explicit FBasisSet object.
# Write the basis set to disk and supply the file name as argument
BasisFile = path.join(BasePath, "BASIS%i" % iWrittenBasis)
iWrittenBasis += 1
with open(BasisFile, "w") as File:
File.write(BasisObj.FmtCr())
Args.append("%s='!%s'" % (ParamName, BasisFile))
else:
assert(isinstance(BasisObj, str))
# it's just a basis set name: append the name to the arguments.
# (set will be read from library by wmme itself)
Args.append("%s=%s" % (ParamName, BasisObj))
pass
# make file names and arguments for output arrays
FileNameOutputs = []
for (ArgName,FileNameBase) in Outputs:
FileName = path.join(BasePath, FileNameBase)
FileNameOutputs.append(FileName)
Args.append("%s='%s'" % (ArgName, FileName))
XyzLines = "%i\n\n%s\n" % (len(Atoms), Atoms.MakeXyz("%24.16f"))
# ^- note on the .16f: it actually does make a difference. I had .8f
# there before, and it lead to energy changes on the order of 1e-8
# when treating only non-redundant subsystem out of a symmetric
# arrangement.
try:
with open(FileNameXyz, "w") as File:
File.write(XyzLines)
# save input arrays if provided.
if Inputs:
for (ArgName,FileNameBase,Array) in Inputs:
FileName = path.join(BasePath, FileNameBase)
np.save(FileName,Array)
Args.append("%s='%s'" % (ArgName, FileName))
Cmd = "%s %s" % (MakeIntegralsExecutable, " ".join(Args))
#print("!Invoking %s\n" % Cmd)
#iErr, Output = getstatusoutput(Cmd)
#if ( iErr != 0 ):
try:
Output = check_output(Cmd, shell=True)
if (version_info) >= (3,0):
# it returns a byte string in Python 3... which wouldn't be a problem
# if not all OTHER literals were converted to unicode implicitly.
Output = Output.decode("utf-8")
except CalledProcessError as e:
raise Exception("Integral calculation failed. Output was:\n%s\nException was: %s" % (e.output, str(e)))
OutputArrays = []
for FileName in FileNameOutputs:
OutputArrays.append(np.load(FileName))
except:
Cleanup()
raise
# got everything we need. Delete the temporary directory.
Cleanup()
return tuple(OutputArrays)
def ReadXyzFile(FileName,Scale=1./ToAng):
Text = open(FileName,"r").read()
Lines = Text.splitlines()
# allowed formats: <nAtoms> \n Desc \n <atom-list>
# or: <atom-list> (without any headers)
# in the first case, only the first nAtoms+2 lines are read, in the
# second case everything which does not look like a xyz line is
# ignored.
nAtoms = None
r = 0,-1
if ( len(Lines[0].split()) == 1 ):
nAtoms = int(Lines[0].split()[0])
r = 2,nAtoms+2
Atoms = []
Xyz = []
for Line in Lines:
ls = Line.split()
try:
Atom = ls[0]
x,y,z = float(ls[1]), float(ls[2]), float(ls[3])
except:
continue
Atom = Atom[0].upper() + Atom[1:].lower()
# maybe we should allow for (and ignore) group numbers after the
# elements?
if Atom not in ElementNames:
raise Exception("while reading '%s': unrecognized element '%s'." % (FileName,Atom))
Atoms.append(Atom)
Xyz.append((x,y,z))
Xyz = Scale*array(Xyz).T
if 0:
print("*read '%s':\n%s" % (FileName, str(FAtomSet(Xyz, Atoms))))
return Xyz, Atoms
def ReadAtomsFromXyzFile(FileName, Scale=1./ToAng):
Xyz,Elements = ReadXyzFile(FileName, Scale)
return FAtomSet(Xyz, Elements)
|
etcd_data_change
|
Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistenly only when the number of etcd
units has actually changed
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import re
import random
import shutil
import socket
import string
import json
import ipaddress
import charms.leadership
from shlex import split
from subprocess import check_call
from subprocess import check_output
from subprocess import CalledProcessError
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import is_state
from charms.reactive import when, when_any, when_not
from charms.reactive.helpers import data_changed, any_file_changed
from charms.kubernetes.common import get_version
from charms.kubernetes.common import retry
from charms.kubernetes.flagmanager import FlagManager
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import unitdata
from charmhelpers.core.host import service_stop
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
def service_cidr():
''' Return the charm's service-cidr config '''
db = unitdata.kv()
frozen_cidr = db.get('kubernetes-master.service-cidr')
return frozen_cidr or hookenv.config('service-cidr')
def freeze_service_cidr():
''' Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. '''
db = unitdata.kv()
db.set('kubernetes-master.service-cidr', service_cidr())
@hook('upgrade-charm')
def reset_states_for_delivery():
'''An upgrade charm event was triggered by Juju, react to that here.'''
migrate_from_pre_snaps()
install_snaps()
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
def rename_file_idempotent(source, destination):
if os.path.isfile(source):
os.rename(source, destination)
def migrate_from_pre_snaps():
# remove old states
remove_state('kubernetes.components.installed')
remove_state('kubernetes.dashboard.available')
remove_state('kube-dns.available')
remove_state('kubernetes-master.app_version.set')
# disable old services
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
host.service_stop(service)
# rename auth files
os.makedirs('/root/cdk', exist_ok=True)
rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
'/root/cdk/serviceaccount.key')
rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
'/root/cdk/basic_auth.csv')
rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
'/root/cdk/known_tokens.csv')
# cleanup old files
files = [
"/lib/systemd/system/kube-apiserver.service",
"/lib/systemd/system/kube-controller-manager.service",
"/lib/systemd/system/kube-scheduler.service",
"/etc/default/kube-defaults",
"/etc/default/kube-apiserver.defaults",
"/etc/default/kube-controller-manager.defaults",
"/etc/default/kube-scheduler.defaults",
"/srv/kubernetes",
"/home/ubuntu/kubectl",
"/usr/local/bin/kubectl",
"/usr/local/bin/kube-apiserver",
"/usr/local/bin/kube-controller-manager",
"/usr/local/bin/kube-scheduler",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
# clear the flag managers
FlagManager('kube-apiserver').destroy_all()
FlagManager('kube-controller-manager').destroy_all()
FlagManager('kube-scheduler').destroy_all()
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-apiserver snap')
snap.install('kube-apiserver', channel=channel)
hookenv.status_set('maintenance',
'Installing kube-controller-manager snap')
snap.install('kube-controller-manager', channel=channel)
hookenv.status_set('maintenance', 'Installing kube-scheduler snap')
snap.install('kube-scheduler', channel=channel)
hookenv.status_set('maintenance', 'Installing cdk-addons snap')
snap.install('cdk-addons', channel=channel)
set_state('kubernetes-master.snaps.installed')
remove_state('kubernetes-master.components.started')
@when('config.changed.channel')
def channel_changed():
install_snaps()
@when('config.changed.client_password', 'leadership.is_leader')
def password_changed():
"""Handle password change via the charms config."""
password = hookenv.config('client_password')
if password == "" and is_state('client.password.initialised'):
# password_changed is called during an upgrade. Nothing to do.
return
elif password == "":
# Password not initialised
password = token_generator()
setup_basic_auth(password, "admin", "admin")
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
set_state('client.password.initialised')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly. '''
cni.set_config(is_master=True, kubeconfig_path='')
@when('leadership.is_leader')
@when_not('authentication.setup')
def setup_leader_authentication():
'''Setup basic authentication and token access for the cluster.'''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
# Try first to fetch data from an old leadership broadcast.
if not get_keys_from_leader(keys) \
or is_state('reconfigure.authentication.setup'):
last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin')
if not os.path.isfile(known_tokens):
setup_tokens(None, 'admin', 'admin')
setup_tokens(None, 'kubelet', 'kubelet')
setup_tokens(None, 'kube_proxy', 'kube_proxy')
# Generate the default service account token key
os.makedirs('/root/cdk', exist_ok=True)
if not os.path.isfile(service_key):
cmd = ['openssl', 'genrsa', '-out', service_key,
'2048']
check_call(cmd)
remove_state('reconfigure.authentication.setup')
api_opts.add('service-account-key-file', service_key)
controller_opts.add('service-account-private-key-file', service_key)
# read service account key for syndication
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
# this is slightly opaque, but we are sending file contents under its file
# path as a key.
# eg:
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
charms.leadership.leader_set(leader_data)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
@when_not('leadership.is_leader')
def setup_non_leader_authentication():
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
keys = [service_key, basic_auth, known_tokens]
# The source of truth for non-leaders is the leader.
# Therefore we overwrite_local with whatever the leader has.
if not get_keys_from_leader(keys, overwrite_local=True):
# the keys were not retrieved. Non-leaders have to retry.
return
if not any_file_changed(keys) and is_state('authentication.setup'):
# No change detected and we have already setup the authentication
return
hookenv.status_set('maintenance', 'Rendering authentication templates.')
api_opts = FlagManager('kube-apiserver')
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
api_opts.add('service-account-key-file', service_key)
controller_opts = FlagManager('kube-controller-manager')
controller_opts.add('service-account-private-key-file', service_key)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
def get_keys_from_leader(keys, overwrite_local=False):
"""
Gets the broadcasted keys from the leader and stores them in
the corresponding files.
Args:
keys: list of keys. Keys are actually files on the FS.
Returns: True if all key were fetched, False if not.
"""
# This races with other codepaths, and seems to require being created first
# This block may be extracted later, but for now seems to work as intended
os.makedirs('/root/cdk', exist_ok=True)
for k in keys:
# If the path does not exist, assume we need it
if not os.path.exists(k) or overwrite_local:
# Fetch data from leadership broadcast
contents = charms.leadership.leader_get(k)
# Default to logging the warning and wait for leader data to be set
if contents is None:
msg = "Waiting on leaders crypto keys."
hookenv.status_set('waiting', msg)
hookenv.log('Missing content for file {}'.format(k))
return False
# Write out the file and move on to the next item
with open(k, 'w+') as fp:
fp.write(contents)
return True
@when('kubernetes-master.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('cdk-addons.configured', 'kube-api-endpoint.available',
'kube-control.connected')
def idle_status(kube_api, kube_control):
''' Signal at the end of the run that we are running. '''
if not all_kube_system_pods_running():
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
elif hookenv.config('service-cidr') != service_cidr():
msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()
hookenv.status_set('active', msg)
else:
# All services should be up and running at this point. Double-check...
failing_services = master_services_down()
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes master running.')
else:
msg = 'Stopped services: {}'.format(','.join(failing_services))
hookenv.status_set('blocked', msg)
def master_services_down():
"""Ensure master services are up and running.
Return: list of failing services"""
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not host.service_running(daemon):
failing_services.append(service)
return failing_services
@when('etcd.available', 'tls_client.server.certificate.saved',
'authentication.setup')
@when_not('kubernetes-master.components.started')
def start_master(etcd):
'''Run the Kubernetes master components.'''
hookenv.status_set('maintenance',
'Configuring the Kubernetes master services.')
freeze_service_cidr()
if not etcd.get_connection_string():
# etcd is not returning a connection string. This hapens when
# the master unit disconnects from etcd and is ready to terminate.
# No point in trying to start master services and fail. Just return.
return
handle_etcd_relation(etcd)
configure_master_services()
hookenv.status_set('maintenance',
'Starting the Kubernetes master services.')
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
host.service_restart('snap.%s.daemon' % service)
hookenv.open_port(6443)
set_state('kubernetes-master.components.started')
# MASKED: etcd_data_change function (lines 378-391)
@when('kube-control.connected')
@when('cdk-addons.configured')
def send_cluster_dns_detail(kube_control):
''' Send cluster DNS info '''
# Note that the DNS server doesn't necessarily exist at this point. We know
# where we're going to put it, though, so let's send the info anyway.
dns_ip = get_dns_ip()
kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip)
@when('kube-control.auth.requested')
@when('authentication.setup')
@when('leadership.is_leader')
def send_tokens(kube_control):
"""Send the tokens to the workers."""
kubelet_token = get_token('kubelet')
proxy_token = get_token('kube_proxy')
admin_token = get_token('admin')
# Send the data
requests = kube_control.auth_user()
for request in requests:
kube_control.sign_auth_request(request[0], kubelet_token,
proxy_token, admin_token)
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator master is waiting for a relation to workers.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set('blocked', 'Waiting for workers.')
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
''' Send configuration to the load balancer, and close access to the
public interface '''
kube_api.configure(port=6443)
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Get the SDN gateway based on the cidr address.
kubernetes_service_ip = get_kubernetes_service_ip()
domain = hookenv.config('dns_domain')
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
socket.gethostname(),
kubernetes_service_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kubernetes-master.components.started')
def configure_cdk_addons():
''' Configure CDK addons '''
remove_state('cdk-addons.configured')
dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()
args = [
'arch=' + arch(),
'dns-ip=' + get_dns_ip(),
'dns-domain=' + hookenv.config('dns_domain'),
'enable-dashboard=' + dbEnabled
]
check_call(['snap', 'set', 'cdk-addons'] + args)
if not addons_ready():
hookenv.status_set('waiting', 'Waiting to retry addon deployment')
remove_state('cdk-addons.configured')
return
set_state('cdk-addons.configured')
@retry(times=3, delay_secs=20)
def addons_ready():
"""
Test if the add ons got installed
Returns: True is the addons got applied
"""
try:
check_call(['cdk-addons.apply'])
return True
except CalledProcessError:
hookenv.log("Addons are not ready yet.")
return False
@when('loadbalancer.available', 'certificates.ca.available',
'certificates.client.cert.available', 'authentication.setup')
def loadbalancer_kubeconfig(loadbalancer, ca, client):
# Get the potential list of loadbalancers from the relation object.
hosts = loadbalancer.get_addresses_ports()
# Get the public address of loadbalancers so users can access the cluster.
address = hosts[0].get('public-address')
# Get the port of the loadbalancer so users can access the cluster.
port = hosts[0].get('port')
server = 'https://{0}:{1}'.format(address, port)
build_kubeconfig(server)
@when('certificates.ca.available', 'certificates.client.cert.available',
'authentication.setup')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
'''Create a kubernetes configuration for the master unit.'''
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server)
@when('ceph-storage.available')
def ceph_state_control(ceph_admin):
''' Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs '''
ceph_relation_data = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'hostname': socket.gethostname(),
'key': ceph_admin.key()
}
# Re-execute the rendering if the data has changed.
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured')
@when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'''Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation.'''
ceph_context = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'use_syslog': "true",
'ceph_public_network': '',
'ceph_cluster_network': '',
'loglevel': 1,
'hostname': socket.gethostname(),
}
# Install the ceph common utilities.
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if not os.path.isdir(etc_ceph_directory):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
# Render the ceph configuration from the ceph conf template
render('ceph.conf', charm_ceph_conf, ceph_context)
# The key can rotate independently of other ceph config, so validate it
admin_key = os.path.join(etc_ceph_directory,
'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write("[client.admin]\n\tkey = {}\n".format(
ceph_admin.key()))
except IOError as err:
hookenv.log("IOError writing admin.keyring: {}".format(err))
# Enlist the ceph-admin key as a kubernetes secret
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
# We didn't have a key, and cannot proceed. Do not set state and
# allow this method to re-execute
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
# At first glance this is deceptive. The apply stanza will create if
# it doesn't exist, otherwise it will update the entry, ensuring our
# ceph-secret is always reflective of what we have in /etc/ceph
# assuming we have invoked this anytime that file would change.
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except:
# the enlistment in kubernetes failed, return and prepare for re-exec
return
# when complete, set a state relating to configuration of the storage
# backend that will allow other modules to hook into this and verify we
# have performed the necessary pre-req steps to interface with a ceph
# deployment.
set_state('ceph-storage.configured')
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-master.components.started')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def is_privileged():
"""Return boolean indicating whether or not to set allow-privileged=true.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
return is_state('kubernetes-master.gpu.enabled')
else:
return privileged == 'true'
@when('config.changed.allow-privileged')
@when('kubernetes-master.components.started')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
remove_state('kubernetes-master.components.started')
remove_state('config.changed.allow-privileged')
@when('kube-control.gpu.available')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.gpu.enabled')
def on_gpu_available(kube_control):
"""The remote side (kubernetes-worker) is gpu-enabled.
We need to run in privileged mode.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
remove_state('kubernetes-master.components.started')
set_state('kubernetes-master.gpu.enabled')
@when('kubernetes-master.gpu.enabled')
@when_not('kubernetes-master.privileged')
def disable_gpu_mode():
"""We were in gpu mode, but the operator has set allow-privileged="false",
so we can't run in gpu mode anymore.
"""
remove_state('kubernetes-master.gpu.enabled')
@hook('stop')
def shutdown():
""" Stop the kubernetes master services
"""
service_stop('snap.kube-apiserver.daemon')
service_stop('snap.kube-controller-manager.daemon')
service_stop('snap.kube-scheduler.daemon')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def build_kubeconfig(server):
'''Gather the relevant data for Kubernetes configuration objects and create
a config object with that information.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
ca_exists = ca and os.path.isfile(ca)
client_pass = get_password('basic_auth.csv', 'admin')
# Do we have everything we need?
if ca_exists and client_pass:
# Create an absolute path for the kubeconfig file.
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
# Create the kubeconfig on this system so users can access the cluster.
create_kubeconfig(kubeconfig_path, server, ca,
user='admin', password=client_pass)
# Make the config file readable by the ubuntu users so juju scp works.
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_dns_ip():
'''Get an IP address for the DNS server on the provided cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .10 at the end of the network
ip = interface.network.network_address + 10
return ip.exploded
def get_kubernetes_service_ip():
'''Get the IP address for the kubernetes service based on the cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .1 at the end of the network
ip = interface.network.network_address + 1
return ip.exploded
def handle_etcd_relation(reldata):
''' Save the client credentials and set appropriate daemon flags when
etcd declares itself as available'''
connection_string = reldata.get_connection_string()
# Define where the etcd tls files will be kept.
etcd_dir = '/root/cdk/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
api_opts = FlagManager('kube-apiserver')
# Never use stale data, always prefer whats coming in during context
# building. if its stale, its because whats in unitdata is stale
data = api_opts.data
if data.get('etcd-servers-strict') or data.get('etcd-servers'):
api_opts.destroy('etcd-cafile')
api_opts.destroy('etcd-keyfile')
api_opts.destroy('etcd-certfile')
api_opts.destroy('etcd-servers', strict=True)
api_opts.destroy('etcd-servers')
# Set the apiserver flags in the options manager
api_opts.add('etcd-cafile', ca)
api_opts.add('etcd-keyfile', key)
api_opts.add('etcd-certfile', cert)
api_opts.add('etcd-servers', connection_string, strict=True)
def configure_master_services():
''' Add remaining flags for the master services and configure snaps to use
them '''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
scheduler_opts = FlagManager('kube-scheduler')
scheduler_opts.add('v', '2')
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
client_cert_path = layer_options.get('client_certificate_path')
client_key_path = layer_options.get('client_key_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
if is_privileged():
api_opts.add('allow-privileged', 'true', strict=True)
set_state('kubernetes-master.privileged')
else:
api_opts.add('allow-privileged', 'false', strict=True)
remove_state('kubernetes-master.privileged')
# Handle static options for now
api_opts.add('service-cluster-ip-range', service_cidr())
api_opts.add('min-request-timeout', '300')
api_opts.add('v', '4')
api_opts.add('tls-cert-file', server_cert_path)
api_opts.add('tls-private-key-file', server_key_path)
api_opts.add('kubelet-certificate-authority', ca_cert_path)
api_opts.add('kubelet-client-certificate', client_cert_path)
api_opts.add('kubelet-client-key', client_key_path)
api_opts.add('logtostderr', 'true')
api_opts.add('insecure-bind-address', '127.0.0.1')
api_opts.add('insecure-port', '8080')
api_opts.add('storage-backend', 'etcd2') # FIXME: add etcd3 support
admission_control = [
'Initializers',
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'ResourceQuota',
'DefaultTolerationSeconds'
]
if get_version('kube-apiserver') < (1, 6):
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control.remove('DefaultTolerationSeconds')
if get_version('kube-apiserver') < (1, 7):
hookenv.log('Removing Initializers from admission-control')
admission_control.remove('Initializers')
api_opts.add('admission-control', ','.join(admission_control), strict=True)
# Default to 3 minute resync. TODO: Make this configureable?
controller_opts.add('min-resync-period', '3m')
controller_opts.add('v', '2')
controller_opts.add('root-ca-file', ca_cert_path)
controller_opts.add('logtostderr', 'true')
controller_opts.add('master', 'http://127.0.0.1:8080')
scheduler_opts.add('v', '2')
scheduler_opts.add('logtostderr', 'true')
scheduler_opts.add('master', 'http://127.0.0.1:8080')
cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ')
check_call(cmd)
cmd = (
['snap', 'set', 'kube-controller-manager'] +
controller_opts.to_s().split(' ')
)
check_call(cmd)
cmd = ['snap', 'set', 'kube-scheduler'] + scheduler_opts.to_s().split(' ')
check_call(cmd)
def setup_basic_auth(password=None, username='admin', uid='admin'):
'''Create the htacces file and the tokens.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
if not password:
password = token_generator()
with open(htaccess, 'w') as stream:
stream.write('{0},{1},{2}'.format(password, username, uid))
def setup_tokens(token, username, user):
'''Create a token file for kubernetes authentication.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
if not token:
token = token_generator()
with open(known_tokens, 'a') as stream:
stream.write('{0},{1},{2}\n'.format(token, username, user))
def get_password(csv_fname, user):
'''Get the password of user within the csv file provided.'''
root_cdk = '/root/cdk'
tokens_fname = os.path.join(root_cdk, csv_fname)
if not os.path.isfile(tokens_fname):
return None
with open(tokens_fname, 'r') as stream:
for line in stream:
record = line.split(',')
if record[1] == user:
return record[0]
return None
def get_token(username):
"""Grab a token from the static file if present. """
return get_password('known_tokens.csv', username)
def set_token(password, save_salt):
''' Store a token so it can be recalled later by token_generator.
param: password - the password to be stored
param: save_salt - the key to store the value of the token.'''
db = unitdata.kv()
db.set(save_salt, password)
return db.get(save_salt)
def token_generator(length=32):
''' Generate a random token for use in passwords and account tokens.
param: length - the length of the token to generate'''
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length))
return token
@retry(times=3, delay_secs=10)
def all_kube_system_pods_running():
''' Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise. '''
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
result = json.loads(output)
for pod in result['items']:
status = pod['status']['phase']
if status != 'Running':
return False
return True
def apiserverVersion():
cmd = 'kube-apiserver --version'.split()
version_string = check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
|
@when('etcd.available')
def etcd_data_change(etcd):
''' Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistenly only when the number of etcd
units has actually changed '''
# key off of the connection string
connection_string = etcd.get_connection_string()
# If the connection string changes, remove the started state to trigger
# handling of the master components
if data_changed('etcd-connect', connection_string):
remove_state('kubernetes-master.components.started')
| 378
| 391
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import re
import random
import shutil
import socket
import string
import json
import ipaddress
import charms.leadership
from shlex import split
from subprocess import check_call
from subprocess import check_output
from subprocess import CalledProcessError
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import is_state
from charms.reactive import when, when_any, when_not
from charms.reactive.helpers import data_changed, any_file_changed
from charms.kubernetes.common import get_version
from charms.kubernetes.common import retry
from charms.kubernetes.flagmanager import FlagManager
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import unitdata
from charmhelpers.core.host import service_stop
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
def service_cidr():
''' Return the charm's service-cidr config '''
db = unitdata.kv()
frozen_cidr = db.get('kubernetes-master.service-cidr')
return frozen_cidr or hookenv.config('service-cidr')
def freeze_service_cidr():
''' Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. '''
db = unitdata.kv()
db.set('kubernetes-master.service-cidr', service_cidr())
@hook('upgrade-charm')
def reset_states_for_delivery():
'''An upgrade charm event was triggered by Juju, react to that here.'''
migrate_from_pre_snaps()
install_snaps()
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
def rename_file_idempotent(source, destination):
if os.path.isfile(source):
os.rename(source, destination)
def migrate_from_pre_snaps():
# remove old states
remove_state('kubernetes.components.installed')
remove_state('kubernetes.dashboard.available')
remove_state('kube-dns.available')
remove_state('kubernetes-master.app_version.set')
# disable old services
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
host.service_stop(service)
# rename auth files
os.makedirs('/root/cdk', exist_ok=True)
rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
'/root/cdk/serviceaccount.key')
rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
'/root/cdk/basic_auth.csv')
rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
'/root/cdk/known_tokens.csv')
# cleanup old files
files = [
"/lib/systemd/system/kube-apiserver.service",
"/lib/systemd/system/kube-controller-manager.service",
"/lib/systemd/system/kube-scheduler.service",
"/etc/default/kube-defaults",
"/etc/default/kube-apiserver.defaults",
"/etc/default/kube-controller-manager.defaults",
"/etc/default/kube-scheduler.defaults",
"/srv/kubernetes",
"/home/ubuntu/kubectl",
"/usr/local/bin/kubectl",
"/usr/local/bin/kube-apiserver",
"/usr/local/bin/kube-controller-manager",
"/usr/local/bin/kube-scheduler",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
# clear the flag managers
FlagManager('kube-apiserver').destroy_all()
FlagManager('kube-controller-manager').destroy_all()
FlagManager('kube-scheduler').destroy_all()
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-apiserver snap')
snap.install('kube-apiserver', channel=channel)
hookenv.status_set('maintenance',
'Installing kube-controller-manager snap')
snap.install('kube-controller-manager', channel=channel)
hookenv.status_set('maintenance', 'Installing kube-scheduler snap')
snap.install('kube-scheduler', channel=channel)
hookenv.status_set('maintenance', 'Installing cdk-addons snap')
snap.install('cdk-addons', channel=channel)
set_state('kubernetes-master.snaps.installed')
remove_state('kubernetes-master.components.started')
@when('config.changed.channel')
def channel_changed():
install_snaps()
@when('config.changed.client_password', 'leadership.is_leader')
def password_changed():
"""Handle password change via the charms config."""
password = hookenv.config('client_password')
if password == "" and is_state('client.password.initialised'):
# password_changed is called during an upgrade. Nothing to do.
return
elif password == "":
# Password not initialised
password = token_generator()
setup_basic_auth(password, "admin", "admin")
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
set_state('client.password.initialised')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly. '''
cni.set_config(is_master=True, kubeconfig_path='')
@when('leadership.is_leader')
@when_not('authentication.setup')
def setup_leader_authentication():
'''Setup basic authentication and token access for the cluster.'''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
# Try first to fetch data from an old leadership broadcast.
if not get_keys_from_leader(keys) \
or is_state('reconfigure.authentication.setup'):
last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin')
if not os.path.isfile(known_tokens):
setup_tokens(None, 'admin', 'admin')
setup_tokens(None, 'kubelet', 'kubelet')
setup_tokens(None, 'kube_proxy', 'kube_proxy')
# Generate the default service account token key
os.makedirs('/root/cdk', exist_ok=True)
if not os.path.isfile(service_key):
cmd = ['openssl', 'genrsa', '-out', service_key,
'2048']
check_call(cmd)
remove_state('reconfigure.authentication.setup')
api_opts.add('service-account-key-file', service_key)
controller_opts.add('service-account-private-key-file', service_key)
# read service account key for syndication
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
# this is slightly opaque, but we are sending file contents under its file
# path as a key.
# eg:
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
charms.leadership.leader_set(leader_data)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
@when_not('leadership.is_leader')
def setup_non_leader_authentication():
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
keys = [service_key, basic_auth, known_tokens]
# The source of truth for non-leaders is the leader.
# Therefore we overwrite_local with whatever the leader has.
if not get_keys_from_leader(keys, overwrite_local=True):
# the keys were not retrieved. Non-leaders have to retry.
return
if not any_file_changed(keys) and is_state('authentication.setup'):
# No change detected and we have already setup the authentication
return
hookenv.status_set('maintenance', 'Rendering authentication templates.')
api_opts = FlagManager('kube-apiserver')
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
api_opts.add('service-account-key-file', service_key)
controller_opts = FlagManager('kube-controller-manager')
controller_opts.add('service-account-private-key-file', service_key)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
def get_keys_from_leader(keys, overwrite_local=False):
"""
Gets the broadcasted keys from the leader and stores them in
the corresponding files.
Args:
keys: list of keys. Keys are actually files on the FS.
Returns: True if all key were fetched, False if not.
"""
# This races with other codepaths, and seems to require being created first
# This block may be extracted later, but for now seems to work as intended
os.makedirs('/root/cdk', exist_ok=True)
for k in keys:
# If the path does not exist, assume we need it
if not os.path.exists(k) or overwrite_local:
# Fetch data from leadership broadcast
contents = charms.leadership.leader_get(k)
# Default to logging the warning and wait for leader data to be set
if contents is None:
msg = "Waiting on leaders crypto keys."
hookenv.status_set('waiting', msg)
hookenv.log('Missing content for file {}'.format(k))
return False
# Write out the file and move on to the next item
with open(k, 'w+') as fp:
fp.write(contents)
return True
@when('kubernetes-master.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('cdk-addons.configured', 'kube-api-endpoint.available',
'kube-control.connected')
def idle_status(kube_api, kube_control):
''' Signal at the end of the run that we are running. '''
if not all_kube_system_pods_running():
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
elif hookenv.config('service-cidr') != service_cidr():
msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()
hookenv.status_set('active', msg)
else:
# All services should be up and running at this point. Double-check...
failing_services = master_services_down()
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes master running.')
else:
msg = 'Stopped services: {}'.format(','.join(failing_services))
hookenv.status_set('blocked', msg)
def master_services_down():
"""Ensure master services are up and running.
Return: list of failing services"""
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not host.service_running(daemon):
failing_services.append(service)
return failing_services
@when('etcd.available', 'tls_client.server.certificate.saved',
'authentication.setup')
@when_not('kubernetes-master.components.started')
def start_master(etcd):
'''Run the Kubernetes master components.'''
hookenv.status_set('maintenance',
'Configuring the Kubernetes master services.')
freeze_service_cidr()
if not etcd.get_connection_string():
# etcd is not returning a connection string. This hapens when
# the master unit disconnects from etcd and is ready to terminate.
# No point in trying to start master services and fail. Just return.
return
handle_etcd_relation(etcd)
configure_master_services()
hookenv.status_set('maintenance',
'Starting the Kubernetes master services.')
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
host.service_restart('snap.%s.daemon' % service)
hookenv.open_port(6443)
set_state('kubernetes-master.components.started')
@when('etcd.available')
def etcd_data_change(etcd):
''' Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistenly only when the number of etcd
units has actually changed '''
# key off of the connection string
connection_string = etcd.get_connection_string()
# If the connection string changes, remove the started state to trigger
# handling of the master components
if data_changed('etcd-connect', connection_string):
remove_state('kubernetes-master.components.started')
@when('kube-control.connected')
@when('cdk-addons.configured')
def send_cluster_dns_detail(kube_control):
''' Send cluster DNS info '''
# Note that the DNS server doesn't necessarily exist at this point. We know
# where we're going to put it, though, so let's send the info anyway.
dns_ip = get_dns_ip()
kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip)
@when('kube-control.auth.requested')
@when('authentication.setup')
@when('leadership.is_leader')
def send_tokens(kube_control):
"""Send the tokens to the workers."""
kubelet_token = get_token('kubelet')
proxy_token = get_token('kube_proxy')
admin_token = get_token('admin')
# Send the data
requests = kube_control.auth_user()
for request in requests:
kube_control.sign_auth_request(request[0], kubelet_token,
proxy_token, admin_token)
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator master is waiting for a relation to workers.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set('blocked', 'Waiting for workers.')
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
''' Send configuration to the load balancer, and close access to the
public interface '''
kube_api.configure(port=6443)
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Get the SDN gateway based on the cidr address.
kubernetes_service_ip = get_kubernetes_service_ip()
domain = hookenv.config('dns_domain')
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
socket.gethostname(),
kubernetes_service_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kubernetes-master.components.started')
def configure_cdk_addons():
''' Configure CDK addons '''
remove_state('cdk-addons.configured')
dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()
args = [
'arch=' + arch(),
'dns-ip=' + get_dns_ip(),
'dns-domain=' + hookenv.config('dns_domain'),
'enable-dashboard=' + dbEnabled
]
check_call(['snap', 'set', 'cdk-addons'] + args)
if not addons_ready():
hookenv.status_set('waiting', 'Waiting to retry addon deployment')
remove_state('cdk-addons.configured')
return
set_state('cdk-addons.configured')
@retry(times=3, delay_secs=20)
def addons_ready():
"""
Test if the add ons got installed
Returns: True is the addons got applied
"""
try:
check_call(['cdk-addons.apply'])
return True
except CalledProcessError:
hookenv.log("Addons are not ready yet.")
return False
@when('loadbalancer.available', 'certificates.ca.available',
'certificates.client.cert.available', 'authentication.setup')
def loadbalancer_kubeconfig(loadbalancer, ca, client):
# Get the potential list of loadbalancers from the relation object.
hosts = loadbalancer.get_addresses_ports()
# Get the public address of loadbalancers so users can access the cluster.
address = hosts[0].get('public-address')
# Get the port of the loadbalancer so users can access the cluster.
port = hosts[0].get('port')
server = 'https://{0}:{1}'.format(address, port)
build_kubeconfig(server)
@when('certificates.ca.available', 'certificates.client.cert.available',
'authentication.setup')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
'''Create a kubernetes configuration for the master unit.'''
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server)
@when('ceph-storage.available')
def ceph_state_control(ceph_admin):
''' Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs '''
ceph_relation_data = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'hostname': socket.gethostname(),
'key': ceph_admin.key()
}
# Re-execute the rendering if the data has changed.
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured')
@when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'''Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation.'''
ceph_context = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'use_syslog': "true",
'ceph_public_network': '',
'ceph_cluster_network': '',
'loglevel': 1,
'hostname': socket.gethostname(),
}
# Install the ceph common utilities.
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if not os.path.isdir(etc_ceph_directory):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
# Render the ceph configuration from the ceph conf template
render('ceph.conf', charm_ceph_conf, ceph_context)
# The key can rotate independently of other ceph config, so validate it
admin_key = os.path.join(etc_ceph_directory,
'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write("[client.admin]\n\tkey = {}\n".format(
ceph_admin.key()))
except IOError as err:
hookenv.log("IOError writing admin.keyring: {}".format(err))
# Enlist the ceph-admin key as a kubernetes secret
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
# We didn't have a key, and cannot proceed. Do not set state and
# allow this method to re-execute
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
# At first glance this is deceptive. The apply stanza will create if
# it doesn't exist, otherwise it will update the entry, ensuring our
# ceph-secret is always reflective of what we have in /etc/ceph
# assuming we have invoked this anytime that file would change.
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except:
# the enlistment in kubernetes failed, return and prepare for re-exec
return
# when complete, set a state relating to configuration of the storage
# backend that will allow other modules to hook into this and verify we
# have performed the necessary pre-req steps to interface with a ceph
# deployment.
set_state('ceph-storage.configured')
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-master.components.started')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def is_privileged():
"""Return boolean indicating whether or not to set allow-privileged=true.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
return is_state('kubernetes-master.gpu.enabled')
else:
return privileged == 'true'
@when('config.changed.allow-privileged')
@when('kubernetes-master.components.started')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
remove_state('kubernetes-master.components.started')
remove_state('config.changed.allow-privileged')
@when('kube-control.gpu.available')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.gpu.enabled')
def on_gpu_available(kube_control):
"""The remote side (kubernetes-worker) is gpu-enabled.
We need to run in privileged mode.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
remove_state('kubernetes-master.components.started')
set_state('kubernetes-master.gpu.enabled')
@when('kubernetes-master.gpu.enabled')
@when_not('kubernetes-master.privileged')
def disable_gpu_mode():
"""We were in gpu mode, but the operator has set allow-privileged="false",
so we can't run in gpu mode anymore.
"""
remove_state('kubernetes-master.gpu.enabled')
@hook('stop')
def shutdown():
""" Stop the kubernetes master services
"""
service_stop('snap.kube-apiserver.daemon')
service_stop('snap.kube-controller-manager.daemon')
service_stop('snap.kube-scheduler.daemon')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def build_kubeconfig(server):
'''Gather the relevant data for Kubernetes configuration objects and create
a config object with that information.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
ca_exists = ca and os.path.isfile(ca)
client_pass = get_password('basic_auth.csv', 'admin')
# Do we have everything we need?
if ca_exists and client_pass:
# Create an absolute path for the kubeconfig file.
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
# Create the kubeconfig on this system so users can access the cluster.
create_kubeconfig(kubeconfig_path, server, ca,
user='admin', password=client_pass)
# Make the config file readable by the ubuntu users so juju scp works.
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_dns_ip():
'''Get an IP address for the DNS server on the provided cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .10 at the end of the network
ip = interface.network.network_address + 10
return ip.exploded
def get_kubernetes_service_ip():
'''Get the IP address for the kubernetes service based on the cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .1 at the end of the network
ip = interface.network.network_address + 1
return ip.exploded
def handle_etcd_relation(reldata):
''' Save the client credentials and set appropriate daemon flags when
etcd declares itself as available'''
connection_string = reldata.get_connection_string()
# Define where the etcd tls files will be kept.
etcd_dir = '/root/cdk/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
api_opts = FlagManager('kube-apiserver')
# Never use stale data, always prefer whats coming in during context
# building. if its stale, its because whats in unitdata is stale
data = api_opts.data
if data.get('etcd-servers-strict') or data.get('etcd-servers'):
api_opts.destroy('etcd-cafile')
api_opts.destroy('etcd-keyfile')
api_opts.destroy('etcd-certfile')
api_opts.destroy('etcd-servers', strict=True)
api_opts.destroy('etcd-servers')
# Set the apiserver flags in the options manager
api_opts.add('etcd-cafile', ca)
api_opts.add('etcd-keyfile', key)
api_opts.add('etcd-certfile', cert)
api_opts.add('etcd-servers', connection_string, strict=True)
def configure_master_services():
''' Add remaining flags for the master services and configure snaps to use
them '''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
scheduler_opts = FlagManager('kube-scheduler')
scheduler_opts.add('v', '2')
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
client_cert_path = layer_options.get('client_certificate_path')
client_key_path = layer_options.get('client_key_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
if is_privileged():
api_opts.add('allow-privileged', 'true', strict=True)
set_state('kubernetes-master.privileged')
else:
api_opts.add('allow-privileged', 'false', strict=True)
remove_state('kubernetes-master.privileged')
# Handle static options for now
api_opts.add('service-cluster-ip-range', service_cidr())
api_opts.add('min-request-timeout', '300')
api_opts.add('v', '4')
api_opts.add('tls-cert-file', server_cert_path)
api_opts.add('tls-private-key-file', server_key_path)
api_opts.add('kubelet-certificate-authority', ca_cert_path)
api_opts.add('kubelet-client-certificate', client_cert_path)
api_opts.add('kubelet-client-key', client_key_path)
api_opts.add('logtostderr', 'true')
api_opts.add('insecure-bind-address', '127.0.0.1')
api_opts.add('insecure-port', '8080')
api_opts.add('storage-backend', 'etcd2') # FIXME: add etcd3 support
admission_control = [
'Initializers',
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'ResourceQuota',
'DefaultTolerationSeconds'
]
if get_version('kube-apiserver') < (1, 6):
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control.remove('DefaultTolerationSeconds')
if get_version('kube-apiserver') < (1, 7):
hookenv.log('Removing Initializers from admission-control')
admission_control.remove('Initializers')
api_opts.add('admission-control', ','.join(admission_control), strict=True)
# Default to 3 minute resync. TODO: Make this configureable?
controller_opts.add('min-resync-period', '3m')
controller_opts.add('v', '2')
controller_opts.add('root-ca-file', ca_cert_path)
controller_opts.add('logtostderr', 'true')
controller_opts.add('master', 'http://127.0.0.1:8080')
scheduler_opts.add('v', '2')
scheduler_opts.add('logtostderr', 'true')
scheduler_opts.add('master', 'http://127.0.0.1:8080')
cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ')
check_call(cmd)
cmd = (
['snap', 'set', 'kube-controller-manager'] +
controller_opts.to_s().split(' ')
)
check_call(cmd)
cmd = ['snap', 'set', 'kube-scheduler'] + scheduler_opts.to_s().split(' ')
check_call(cmd)
def setup_basic_auth(password=None, username='admin', uid='admin'):
'''Create the htacces file and the tokens.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
if not password:
password = token_generator()
with open(htaccess, 'w') as stream:
stream.write('{0},{1},{2}'.format(password, username, uid))
def setup_tokens(token, username, user):
'''Create a token file for kubernetes authentication.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
if not token:
token = token_generator()
with open(known_tokens, 'a') as stream:
stream.write('{0},{1},{2}\n'.format(token, username, user))
def get_password(csv_fname, user):
'''Get the password of user within the csv file provided.'''
root_cdk = '/root/cdk'
tokens_fname = os.path.join(root_cdk, csv_fname)
if not os.path.isfile(tokens_fname):
return None
with open(tokens_fname, 'r') as stream:
for line in stream:
record = line.split(',')
if record[1] == user:
return record[0]
return None
def get_token(username):
"""Grab a token from the static file if present. """
return get_password('known_tokens.csv', username)
def set_token(password, save_salt):
''' Store a token so it can be recalled later by token_generator.
param: password - the password to be stored
param: save_salt - the key to store the value of the token.'''
db = unitdata.kv()
db.set(save_salt, password)
return db.get(save_salt)
def token_generator(length=32):
''' Generate a random token for use in passwords and account tokens.
param: length - the length of the token to generate'''
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length))
return token
@retry(times=3, delay_secs=10)
def all_kube_system_pods_running():
''' Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise. '''
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
result = json.loads(output)
for pod in result['items']:
status = pod['status']['phase']
if status != 'Running':
return False
return True
def apiserverVersion():
cmd = 'kube-apiserver --version'.split()
version_string = check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
|
onehot_encode
|
One hot encode the tokens
Args:
dataset list of lists of tokens
char_indices dictionary of {key=character, value=index to use encoding vector}
maxlen int Length of each sample
Return:
np array of shape (samples, tokens, encoding length)
|
# coding: utf-8
# In[ ]:
import os
import re
import tarfile
import requests
from pugnlp.futil import path_status, find_files
# In[ ]:
# From the nlpia package for downloading data too big for the repo
BIG_URLS = {
'w2v': (
'https://www.dropbox.com/s/965dir4dje0hfi4/GoogleNews-vectors-negative300.bin.gz?dl=1',
1647046227,
),
'slang': (
'https://www.dropbox.com/s/43c22018fbfzypd/slang.csv.gz?dl=1',
117633024,
),
'tweets': (
'https://www.dropbox.com/s/5gpb43c494mc8p0/tweets.csv.gz?dl=1',
311725313,
),
'lsa_tweets': (
'https://www.dropbox.com/s/rpjt0d060t4n1mr/lsa_tweets_5589798_2003588x200.tar.gz?dl=1',
3112841563, # 3112841312,
),
'imdb': (
'https://www.dropbox.com/s/yviic64qv84x73j/aclImdb_v1.tar.gz?dl=1',
3112841563, # 3112841312,
),
}
# In[ ]:
# These functions are part of the nlpia package which can be pip installed and run from there.
def dropbox_basename(url):
filename = os.path.basename(url)
match = re.findall(r'\?dl=[0-9]$', filename)
if match:
return filename[:-len(match[0])]
return filename
def download_file(url, data_path='.', filename=None, size=None, chunk_size=4096, verbose=True):
"""Uses stream=True and a reasonable chunk size to be able to download large (GB) files over https"""
if filename is None:
filename = dropbox_basename(url)
file_path = os.path.join(data_path, filename)
if url.endswith('?dl=0'):
url = url[:-1] + '1' # noninteractive download
if verbose:
tqdm_prog = tqdm
print('requesting URL: {}'.format(url))
else:
tqdm_prog = no_tqdm
r = requests.get(url, stream=True, allow_redirects=True)
size = r.headers.get('Content-Length', None) if size is None else size
print('remote size: {}'.format(size))
stat = path_status(file_path)
print('local size: {}'.format(stat.get('size', None)))
if stat['type'] == 'file' and stat['size'] == size: # TODO: check md5 or get the right size of remote file
r.close()
return file_path
print('Downloading to {}'.format(file_path))
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive chunks
f.write(chunk)
r.close()
return file_path
def untar(fname):
if fname.endswith("tar.gz"):
with tarfile.open(fname) as tf:
tf.extractall()
else:
print("Not a tar.gz file: {}".format(fname))
# In[ ]:
# UNCOMMENT these 2 lines if you haven't already download the word2vec model and the imdb dataset
# download_file(BIG_URLS['w2v'][0])
# untar(download_file(BIG_URLS['imdb'][0]))
# In[ ]:
maxlen = 400
batch_size = 32
embedding_dims = 300
epochs = 2
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
import glob
import os
from random import shuffle
def pre_process_data(filepath):
"""
This is dependent on your training data source but we will try to generalize it as best as possible.
"""
positive_path = os.path.join(filepath, 'pos')
negative_path = os.path.join(filepath, 'neg')
pos_label = 1
neg_label = 0
dataset = []
for filename in glob.glob(os.path.join(positive_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((pos_label, f.read()))
for filename in glob.glob(os.path.join(negative_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((neg_label, f.read()))
shuffle(dataset)
return dataset
# In[ ]:
from nltk.tokenize import TreebankWordTokenizer
from gensim.models.keyedvectors import KeyedVectors
word_vectors = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True, limit=200000)
def tokenize_and_vectorize(dataset):
tokenizer = TreebankWordTokenizer()
vectorized_data = []
expected = []
for sample in dataset:
tokens = tokenizer.tokenize(sample[1])
sample_vecs = []
for token in tokens:
try:
sample_vecs.append(word_vectors[token])
except KeyError:
pass # No matching token in the Google w2v vocab
vectorized_data.append(sample_vecs)
return vectorized_data
# In[ ]:
def collect_expected(dataset):
""" Peel of the target values from the dataset """
expected = []
for sample in dataset:
expected.append(sample[0])
return expected
# In[ ]:
def pad_trunc(data, maxlen):
""" For a given dataset pad with zero vectors or truncate to maxlen """
new_data = []
# Create a vector of 0's the length of our word vectors
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if len(sample) > maxlen:
temp = sample[:maxlen]
elif len(sample) < maxlen:
temp = sample
additional_elems = maxlen - len(sample)
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data
# In[ ]:
import numpy as np
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
expected = collect_expected(dataset)
split_point = int(len(vectorized_data)*.8)
x_train = vectorized_data[:split_point]
y_train = expected[:split_point]
x_test = vectorized_data[split_point:]
y_test = expected[split_point:]
maxlen = 400
batch_size = 32 # How many samples to show the net before backpropogating the error and updating the weights
embedding_dims = 300 # Length of the token vectors we will create for passing into the Convnet
epochs = 2
x_train = pad_trunc(x_train, maxlen)
x_test = pad_trunc(x_test, maxlen)
x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))
y_train = np.array(y_train)
x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))
y_test = np.array(y_test)
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("lstm_model1.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("lstm_weights1.h5")
print('Model saved.')
# In[ ]:
from keras.models import model_from_json
with open("lstm_model1.json", "r") as json_file:
json_string = json_file.read()
model = model_from_json(json_string)
model.load_weights('lstm_weights1.h5')
# In[ ]:
sample_1 = "I'm hate that the dismal weather that had me down for so long, when will it break! Ugh, when does happiness return? The sun is blinding and the puffy clouds are too thin. I can't wait for the weekend."
# We pass a dummy value in the first element of the tuple just because our helper expects it from the way processed the initial data. That value won't ever see the network, so it can be whatever.
vec_list = tokenize_and_vectorize([(1, sample_1)])
# Tokenize returns a list of the data (length 1 here)
test_vec_list = pad_trunc(vec_list, maxlen)
test_vec = np.reshape(test_vec_list, (len(test_vec_list), maxlen, embedding_dims))
print("Sample's sentiment, 1 - pos, 2 - neg : {}".format(model.predict_classes(test_vec)))
print("Raw output of sigmoid function: {}".format(model.predict(test_vec)))
# In[ ]:
def test_len(data, maxlen):
total_len = truncated = exact = padded = 0
for sample in data:
total_len += len(sample)
if len(sample) > maxlen:
truncated += 1
elif len(sample) < maxlen:
padded += 1
else:
exact +=1
print('Padded: {}'.format(padded))
print('Equal: {}'.format(exact))
print('Truncated: {}'.format(truncated))
print('Avg length: {}'.format(total_len/len(data)))
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
test_len(vectorized_data, 400)
# In[ ]:
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
maxlen = 200
batch_size = 32 # How many samples to show the net before backpropagating the error and updating the weights
embedding_dims = 300 # Length of the token vectors we will create for passing into the Convnet
epochs = 2
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
expected = collect_expected(dataset)
split_point = int(len(vectorized_data)*.8)
x_train = vectorized_data[:split_point]
y_train = expected[:split_point]
x_test = vectorized_data[split_point:]
y_test = expected[split_point:]
x_train = pad_trunc(x_train, maxlen)
x_test = pad_trunc(x_test, maxlen)
x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))
y_train = np.array(y_train)
x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))
y_test = np.array(y_test)
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("lstm_model7.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("lstm_weights7.h5")
print('Model saved.')
# In[ ]:
dataset = pre_process_data('./aclImdb_v1/train')
expected = collect_expected(dataset)
# In[ ]:
def avg_len(data):
total_len = 0
for sample in data:
total_len += len(sample[1])
print(total_len/len(data))
print(avg_len(dataset))
# In[ ]:
def clean_data(data):
""" Shift to lower case, replace unknowns with UNK, and listify """
new_data = []
VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; '
for sample in data:
new_sample = []
for char in sample[1].lower(): # Just grab the string, not the label
if char in VALID:
new_sample.append(char)
else:
new_sample.append('UNK')
new_data.append(new_sample)
return new_data
listified_data = clean_data(dataset)
# In[ ]:
def char_pad_trunc(data, maxlen):
""" We truncate to maxlen or add in PAD tokens """
new_dataset = []
for sample in data:
if len(sample) > maxlen:
new_data = sample[:maxlen]
elif len(sample) < maxlen:
pads = maxlen - len(sample)
new_data = sample + ['PAD'] * pads
else:
new_data = sample
new_dataset.append(new_data)
return new_dataset
maxlen = 1500
# In[ ]:
def create_dicts(data):
""" Modified from Keras LSTM example"""
chars = set()
for sample in data:
chars.update(set(sample))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
return char_indices, indices_char
# In[ ]:
import numpy as np
# MASKED: onehot_encode function (lines 486-501)
# In[ ]:
dataset = pre_process_data('./aclImdb_v1/train')
expected = collect_expected(dataset)
listified_data = clean_data(dataset)
maxlen = 1500
common_length_data = char_pad_trunc(listified_data, maxlen)
char_indices, indices_char = create_dicts(common_length_data)
encoded_data = onehot_encode(common_length_data, char_indices, maxlen)
# In[ ]:
split_point = int(len(encoded_data)*.8)
x_train = encoded_data[:split_point]
y_train = expected[:split_point]
x_test = encoded_data[split_point:]
y_test = expected[split_point:]
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, Flatten, LSTM
num_neurons = 40
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, len(char_indices.keys()))))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
# In[ ]:
batch_size = 32
epochs = 10
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("char_lstm_model3.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("char_lstm_weights3.h5")
print('Model saved.')
# In[ ]:
from nltk.corpus import gutenberg
print(gutenberg.fileids())
# In[ ]:
text = ''
for txt in gutenberg.fileids():
if 'shakespeare' in txt:
text += gutenberg.raw(txt).lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# In[ ]:
print(text[:500])
# In[ ]:
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
# In[ ]:
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
print(model.summary())
# In[ ]:
epochs = 6
batch_size = 128
model_structure = model.to_json()
with open("shakes_lstm_model.json", "w") as json_file:
json_file.write(model_structure)
for i in range(5):
model.fit(X, y,
batch_size=batch_size,
epochs=epochs)
model.save_weights("shakes_lstm_weights_{}.h5".format(i+1))
print('Model saved.')
# In[ ]:
### NOT IN CHAPTER, Just to reproduce output
from keras.models import model_from_json
with open('shakes_lstm_model.json', 'r') as f:
model_json = f.read()
model = model_from_json(model_json)
model.load_weights('shakes_lstm_weights_4.h5')
# In[ ]:
import random
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# In[ ]:
import sys
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
# In[ ]:
from keras.models import Sequential
from keras.layers import GRU
model = Sequential()
model.add(GRU(num_neurons, return_sequences=True, input_shape=X[0].shape))
# In[ ]:
from keras.models import Sequential
from keras.layers import LSTM
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=X[0].shape))
model.add(LSTM(num_neurons_2, return_sequences=True))
|
def onehot_encode(dataset, char_indices, maxlen):
"""
One hot encode the tokens
Args:
dataset list of lists of tokens
char_indices dictionary of {key=character, value=index to use encoding vector}
maxlen int Length of each sample
Return:
np array of shape (samples, tokens, encoding length)
"""
X = np.zeros((len(dataset), maxlen, len(char_indices.keys())))
for i, sentence in enumerate(dataset):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
return X
| 486
| 501
|
# coding: utf-8
# In[ ]:
import os
import re
import tarfile
import requests
from pugnlp.futil import path_status, find_files
# In[ ]:
# From the nlpia package for downloading data too big for the repo
BIG_URLS = {
'w2v': (
'https://www.dropbox.com/s/965dir4dje0hfi4/GoogleNews-vectors-negative300.bin.gz?dl=1',
1647046227,
),
'slang': (
'https://www.dropbox.com/s/43c22018fbfzypd/slang.csv.gz?dl=1',
117633024,
),
'tweets': (
'https://www.dropbox.com/s/5gpb43c494mc8p0/tweets.csv.gz?dl=1',
311725313,
),
'lsa_tweets': (
'https://www.dropbox.com/s/rpjt0d060t4n1mr/lsa_tweets_5589798_2003588x200.tar.gz?dl=1',
3112841563, # 3112841312,
),
'imdb': (
'https://www.dropbox.com/s/yviic64qv84x73j/aclImdb_v1.tar.gz?dl=1',
3112841563, # 3112841312,
),
}
# In[ ]:
# These functions are part of the nlpia package which can be pip installed and run from there.
def dropbox_basename(url):
filename = os.path.basename(url)
match = re.findall(r'\?dl=[0-9]$', filename)
if match:
return filename[:-len(match[0])]
return filename
def download_file(url, data_path='.', filename=None, size=None, chunk_size=4096, verbose=True):
"""Uses stream=True and a reasonable chunk size to be able to download large (GB) files over https"""
if filename is None:
filename = dropbox_basename(url)
file_path = os.path.join(data_path, filename)
if url.endswith('?dl=0'):
url = url[:-1] + '1' # noninteractive download
if verbose:
tqdm_prog = tqdm
print('requesting URL: {}'.format(url))
else:
tqdm_prog = no_tqdm
r = requests.get(url, stream=True, allow_redirects=True)
size = r.headers.get('Content-Length', None) if size is None else size
print('remote size: {}'.format(size))
stat = path_status(file_path)
print('local size: {}'.format(stat.get('size', None)))
if stat['type'] == 'file' and stat['size'] == size: # TODO: check md5 or get the right size of remote file
r.close()
return file_path
print('Downloading to {}'.format(file_path))
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive chunks
f.write(chunk)
r.close()
return file_path
def untar(fname):
if fname.endswith("tar.gz"):
with tarfile.open(fname) as tf:
tf.extractall()
else:
print("Not a tar.gz file: {}".format(fname))
# In[ ]:
# UNCOMMENT these 2 lines if you haven't already download the word2vec model and the imdb dataset
# download_file(BIG_URLS['w2v'][0])
# untar(download_file(BIG_URLS['imdb'][0]))
# In[ ]:
maxlen = 400
batch_size = 32
embedding_dims = 300
epochs = 2
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
import glob
import os
from random import shuffle
def pre_process_data(filepath):
"""
This is dependent on your training data source but we will try to generalize it as best as possible.
"""
positive_path = os.path.join(filepath, 'pos')
negative_path = os.path.join(filepath, 'neg')
pos_label = 1
neg_label = 0
dataset = []
for filename in glob.glob(os.path.join(positive_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((pos_label, f.read()))
for filename in glob.glob(os.path.join(negative_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((neg_label, f.read()))
shuffle(dataset)
return dataset
# In[ ]:
from nltk.tokenize import TreebankWordTokenizer
from gensim.models.keyedvectors import KeyedVectors
word_vectors = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True, limit=200000)
def tokenize_and_vectorize(dataset):
tokenizer = TreebankWordTokenizer()
vectorized_data = []
expected = []
for sample in dataset:
tokens = tokenizer.tokenize(sample[1])
sample_vecs = []
for token in tokens:
try:
sample_vecs.append(word_vectors[token])
except KeyError:
pass # No matching token in the Google w2v vocab
vectorized_data.append(sample_vecs)
return vectorized_data
# In[ ]:
def collect_expected(dataset):
""" Peel of the target values from the dataset """
expected = []
for sample in dataset:
expected.append(sample[0])
return expected
# In[ ]:
def pad_trunc(data, maxlen):
""" For a given dataset pad with zero vectors or truncate to maxlen """
new_data = []
# Create a vector of 0's the length of our word vectors
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if len(sample) > maxlen:
temp = sample[:maxlen]
elif len(sample) < maxlen:
temp = sample
additional_elems = maxlen - len(sample)
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data
# In[ ]:
import numpy as np
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
expected = collect_expected(dataset)
split_point = int(len(vectorized_data)*.8)
x_train = vectorized_data[:split_point]
y_train = expected[:split_point]
x_test = vectorized_data[split_point:]
y_test = expected[split_point:]
maxlen = 400
batch_size = 32 # How many samples to show the net before backpropogating the error and updating the weights
embedding_dims = 300 # Length of the token vectors we will create for passing into the Convnet
epochs = 2
x_train = pad_trunc(x_train, maxlen)
x_test = pad_trunc(x_test, maxlen)
x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))
y_train = np.array(y_train)
x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))
y_test = np.array(y_test)
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("lstm_model1.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("lstm_weights1.h5")
print('Model saved.')
# In[ ]:
from keras.models import model_from_json
with open("lstm_model1.json", "r") as json_file:
json_string = json_file.read()
model = model_from_json(json_string)
model.load_weights('lstm_weights1.h5')
# In[ ]:
sample_1 = "I'm hate that the dismal weather that had me down for so long, when will it break! Ugh, when does happiness return? The sun is blinding and the puffy clouds are too thin. I can't wait for the weekend."
# We pass a dummy value in the first element of the tuple just because our helper expects it from the way processed the initial data. That value won't ever see the network, so it can be whatever.
vec_list = tokenize_and_vectorize([(1, sample_1)])
# Tokenize returns a list of the data (length 1 here)
test_vec_list = pad_trunc(vec_list, maxlen)
test_vec = np.reshape(test_vec_list, (len(test_vec_list), maxlen, embedding_dims))
print("Sample's sentiment, 1 - pos, 2 - neg : {}".format(model.predict_classes(test_vec)))
print("Raw output of sigmoid function: {}".format(model.predict(test_vec)))
# In[ ]:
def test_len(data, maxlen):
total_len = truncated = exact = padded = 0
for sample in data:
total_len += len(sample)
if len(sample) > maxlen:
truncated += 1
elif len(sample) < maxlen:
padded += 1
else:
exact +=1
print('Padded: {}'.format(padded))
print('Equal: {}'.format(exact))
print('Truncated: {}'.format(truncated))
print('Avg length: {}'.format(total_len/len(data)))
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
test_len(vectorized_data, 400)
# In[ ]:
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
maxlen = 200
batch_size = 32 # How many samples to show the net before backpropagating the error and updating the weights
embedding_dims = 300 # Length of the token vectors we will create for passing into the Convnet
epochs = 2
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
expected = collect_expected(dataset)
split_point = int(len(vectorized_data)*.8)
x_train = vectorized_data[:split_point]
y_train = expected[:split_point]
x_test = vectorized_data[split_point:]
y_test = expected[split_point:]
x_train = pad_trunc(x_train, maxlen)
x_test = pad_trunc(x_test, maxlen)
x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))
y_train = np.array(y_train)
x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))
y_test = np.array(y_test)
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("lstm_model7.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("lstm_weights7.h5")
print('Model saved.')
# In[ ]:
dataset = pre_process_data('./aclImdb_v1/train')
expected = collect_expected(dataset)
# In[ ]:
def avg_len(data):
total_len = 0
for sample in data:
total_len += len(sample[1])
print(total_len/len(data))
print(avg_len(dataset))
# In[ ]:
def clean_data(data):
""" Shift to lower case, replace unknowns with UNK, and listify """
new_data = []
VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; '
for sample in data:
new_sample = []
for char in sample[1].lower(): # Just grab the string, not the label
if char in VALID:
new_sample.append(char)
else:
new_sample.append('UNK')
new_data.append(new_sample)
return new_data
listified_data = clean_data(dataset)
# In[ ]:
def char_pad_trunc(data, maxlen):
""" We truncate to maxlen or add in PAD tokens """
new_dataset = []
for sample in data:
if len(sample) > maxlen:
new_data = sample[:maxlen]
elif len(sample) < maxlen:
pads = maxlen - len(sample)
new_data = sample + ['PAD'] * pads
else:
new_data = sample
new_dataset.append(new_data)
return new_dataset
maxlen = 1500
# In[ ]:
def create_dicts(data):
""" Modified from Keras LSTM example"""
chars = set()
for sample in data:
chars.update(set(sample))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
return char_indices, indices_char
# In[ ]:
import numpy as np
def onehot_encode(dataset, char_indices, maxlen):
"""
One hot encode the tokens
Args:
dataset list of lists of tokens
char_indices dictionary of {key=character, value=index to use encoding vector}
maxlen int Length of each sample
Return:
np array of shape (samples, tokens, encoding length)
"""
X = np.zeros((len(dataset), maxlen, len(char_indices.keys())))
for i, sentence in enumerate(dataset):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
return X
# In[ ]:
dataset = pre_process_data('./aclImdb_v1/train')
expected = collect_expected(dataset)
listified_data = clean_data(dataset)
maxlen = 1500
common_length_data = char_pad_trunc(listified_data, maxlen)
char_indices, indices_char = create_dicts(common_length_data)
encoded_data = onehot_encode(common_length_data, char_indices, maxlen)
# In[ ]:
split_point = int(len(encoded_data)*.8)
x_train = encoded_data[:split_point]
y_train = expected[:split_point]
x_test = encoded_data[split_point:]
y_test = expected[split_point:]
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, Flatten, LSTM
num_neurons = 40
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, len(char_indices.keys()))))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
# In[ ]:
batch_size = 32
epochs = 10
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("char_lstm_model3.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("char_lstm_weights3.h5")
print('Model saved.')
# In[ ]:
from nltk.corpus import gutenberg
print(gutenberg.fileids())
# In[ ]:
text = ''
for txt in gutenberg.fileids():
if 'shakespeare' in txt:
text += gutenberg.raw(txt).lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# In[ ]:
print(text[:500])
# In[ ]:
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
# In[ ]:
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
print(model.summary())
# In[ ]:
epochs = 6
batch_size = 128
model_structure = model.to_json()
with open("shakes_lstm_model.json", "w") as json_file:
json_file.write(model_structure)
for i in range(5):
model.fit(X, y,
batch_size=batch_size,
epochs=epochs)
model.save_weights("shakes_lstm_weights_{}.h5".format(i+1))
print('Model saved.')
# In[ ]:
### NOT IN CHAPTER, Just to reproduce output
from keras.models import model_from_json
with open('shakes_lstm_model.json', 'r') as f:
model_json = f.read()
model = model_from_json(model_json)
model.load_weights('shakes_lstm_weights_4.h5')
# In[ ]:
import random
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# In[ ]:
import sys
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
# In[ ]:
from keras.models import Sequential
from keras.layers import GRU
model = Sequential()
model.add(GRU(num_neurons, return_sequences=True, input_shape=X[0].shape))
# In[ ]:
from keras.models import Sequential
from keras.layers import LSTM
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=X[0].shape))
model.add(LSTM(num_neurons_2, return_sequences=True))
|
get_serializer
|
Return a serializer object for the given method.
:param method: the serialization method; can be either "xml", "xhtml",
"html", "text", or a custom serializer class
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:see: `XMLSerializer`, `XHTMLSerializer`, `HTMLSerializer`, `TextSerializer`
:since: version 0.4.1
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""This module provides different kinds of serialization methods for XML event
streams.
"""
from itertools import chain
import re
from genshi.core import escape, Attrs, Markup, Namespace, QName, StreamEventKind
from genshi.core import START, END, TEXT, XML_DECL, DOCTYPE, START_NS, END_NS, \
START_CDATA, END_CDATA, PI, COMMENT, XML_NAMESPACE
__all__ = ['encode', 'get_serializer', 'DocType', 'XMLSerializer',
'XHTMLSerializer', 'HTMLSerializer', 'TextSerializer']
__docformat__ = 'restructuredtext en'
def encode(iterator, method='xml', encoding=None, out=None):
"""Encode serializer output into a string.
:param iterator: the iterator returned from serializing a stream (basically
any iterator that yields unicode objects)
:param method: the serialization method; determines how characters not
representable in the specified encoding are treated
:param encoding: how the output string should be encoded; if set to `None`,
this method returns a `unicode` object
:param out: a file-like object that the output should be written to
instead of being returned as one big string; note that if
this is a file or socket (or similar), the `encoding` must
not be `None` (that is, the output must be encoded)
:return: a `str` or `unicode` object (depending on the `encoding`
parameter), or `None` if the `out` parameter is provided
:since: version 0.4.1
:note: Changed in 0.5: added the `out` parameter
"""
if encoding is not None:
errors = 'replace'
if method != 'text' and not isinstance(method, TextSerializer):
errors = 'xmlcharrefreplace'
_encode = lambda string: string.encode(encoding, errors)
else:
_encode = lambda string: string
if out is None:
return _encode(''.join(list(iterator)))
for chunk in iterator:
out.write(_encode(chunk))
# MASKED: get_serializer function (lines 62-79)
def _prepare_cache(use_cache=True):
"""Prepare a private token serialization cache.
:param use_cache: boolean indicating whether a real cache should
be used or not. If not, the returned functions
are no-ops.
:return: emit and get functions, for storing and retrieving
serialized values from the cache.
"""
cache = {}
if use_cache:
def _emit(kind, input, output):
cache[kind, input] = output
return output
_get = cache.get
else:
def _emit(kind, input, output):
return output
def _get(key):
pass
return _emit, _get, cache
class DocType(object):
"""Defines a number of commonly used DOCTYPE declarations as constants."""
HTML_STRICT = (
'html', '-//W3C//DTD HTML 4.01//EN',
'http://www.w3.org/TR/html4/strict.dtd'
)
HTML_TRANSITIONAL = (
'html', '-//W3C//DTD HTML 4.01 Transitional//EN',
'http://www.w3.org/TR/html4/loose.dtd'
)
HTML_FRAMESET = (
'html', '-//W3C//DTD HTML 4.01 Frameset//EN',
'http://www.w3.org/TR/html4/frameset.dtd'
)
HTML = HTML_STRICT
HTML5 = ('html', None, None)
XHTML_STRICT = (
'html', '-//W3C//DTD XHTML 1.0 Strict//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd'
)
XHTML_TRANSITIONAL = (
'html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'
)
XHTML_FRAMESET = (
'html', '-//W3C//DTD XHTML 1.0 Frameset//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd'
)
XHTML = XHTML_STRICT
XHTML11 = (
'html', '-//W3C//DTD XHTML 1.1//EN',
'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'
)
SVG_FULL = (
'svg', '-//W3C//DTD SVG 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'
)
SVG_BASIC = (
'svg', '-//W3C//DTD SVG Basic 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-basic.dtd'
)
SVG_TINY = (
'svg', '-//W3C//DTD SVG Tiny 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-tiny.dtd'
)
SVG = SVG_FULL
@classmethod
def get(cls, name):
"""Return the ``(name, pubid, sysid)`` tuple of the ``DOCTYPE``
declaration for the specified name.
The following names are recognized in this version:
* "html" or "html-strict" for the HTML 4.01 strict DTD
* "html-transitional" for the HTML 4.01 transitional DTD
* "html-frameset" for the HTML 4.01 frameset DTD
* "html5" for the ``DOCTYPE`` proposed for HTML5
* "xhtml" or "xhtml-strict" for the XHTML 1.0 strict DTD
* "xhtml-transitional" for the XHTML 1.0 transitional DTD
* "xhtml-frameset" for the XHTML 1.0 frameset DTD
* "xhtml11" for the XHTML 1.1 DTD
* "svg" or "svg-full" for the SVG 1.1 DTD
* "svg-basic" for the SVG Basic 1.1 DTD
* "svg-tiny" for the SVG Tiny 1.1 DTD
:param name: the name of the ``DOCTYPE``
:return: the ``(name, pubid, sysid)`` tuple for the requested
``DOCTYPE``, or ``None`` if the name is not recognized
:since: version 0.4.1
"""
return {
'html': cls.HTML, 'html-strict': cls.HTML_STRICT,
'html-transitional': DocType.HTML_TRANSITIONAL,
'html-frameset': DocType.HTML_FRAMESET,
'html5': cls.HTML5,
'xhtml': cls.XHTML, 'xhtml-strict': cls.XHTML_STRICT,
'xhtml-transitional': cls.XHTML_TRANSITIONAL,
'xhtml-frameset': cls.XHTML_FRAMESET,
'xhtml11': cls.XHTML11,
'svg': cls.SVG, 'svg-full': cls.SVG_FULL,
'svg-basic': cls.SVG_BASIC,
'svg-tiny': cls.SVG_TINY
}.get(name.lower())
class XMLSerializer(object):
"""Produces XML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XMLSerializer()(elem.generate())))
<div><a href="foo"/><br/><hr noshade="True"/></div>
"""
_PRESERVE_SPACE = frozenset()
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, cache=True):
"""Initialize the XML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output, or the name of a DOCTYPE as
defined in `DocType.get`
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.4.2: The `doctype` parameter can now be a string.
:note: Changed in 0.6: The `cache` parameter was added
"""
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = cache
def _prepare_cache(self):
return _prepare_cache(self.cache)[:2]
def __call__(self, stream):
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
buf += [' ', attr, '="', escape(value), '"']
buf.append(kind is EMPTY and '/>' or '>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is XML_DECL and not have_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class XHTMLSerializer(XMLSerializer):
"""Produces XHTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XHTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br /><hr noshade="noshade" /></div>
"""
_EMPTY_ELEMS = frozenset(['area', 'base', 'basefont', 'br', 'col', 'frame',
'hr', 'img', 'input', 'isindex', 'link', 'meta',
'param'])
_BOOLEAN_ATTRS = frozenset(['selected', 'checked', 'compact', 'declare',
'defer', 'disabled', 'ismap', 'multiple',
'nohref', 'noresize', 'noshade', 'nowrap'])
_PRESERVE_SPACE = frozenset([
QName('pre'), QName('http://www.w3.org/1999/xhtml}pre'),
QName('textarea'), QName('http://www.w3.org/1999/xhtml}textarea')
])
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, drop_xml_decl=True, cache=True):
super(XHTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
namespace_prefixes = namespace_prefixes or {}
namespace_prefixes['http://www.w3.org/1999/xhtml'] = ''
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.drop_xml_decl = drop_xml_decl
self.cache = cache
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
drop_xml_decl = self.drop_xml_decl
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
value = attr
elif attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr == 'xml:space':
continue
buf += [' ', attr, '="', escape(value), '"']
if kind is EMPTY:
if tag in empty_elems:
buf.append(' />')
else:
buf.append('></%s>' % tag)
else:
buf.append('>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is XML_DECL and not have_decl and not drop_xml_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class HTMLSerializer(XHTMLSerializer):
"""Produces HTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(HTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br><hr noshade></div>
"""
_NOESCAPE_ELEMS = frozenset([
QName('script'), QName('http://www.w3.org/1999/xhtml}script'),
QName('style'), QName('http://www.w3.org/1999/xhtml}style')
])
def __init__(self, doctype=None, strip_whitespace=True, cache=True):
"""Initialize the HTML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.6: The `cache` parameter was added
"""
super(HTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE,
self._NOESCAPE_ELEMS))
self.filters.append(NamespaceFlattener(prefixes={
'http://www.w3.org/1999/xhtml': ''
}, cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = True
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
noescape_elems = self._NOESCAPE_ELEMS
have_doctype = False
noescape = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, _ in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
output = _get((kind, data))
if output is not None:
yield output
if (kind is START or kind is EMPTY) \
and data[0] in noescape_elems:
noescape = True
elif kind is END:
noescape = False
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
if value:
buf += [' ', attr]
elif ':' in attr:
if attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr != 'xmlns':
buf += [' ', attr, '="', escape(value), '"']
buf.append('>')
if kind is EMPTY:
if tag not in empty_elems:
buf.append('</%s>' % tag)
yield _emit(kind, data, Markup(''.join(buf)))
if tag in noescape_elems:
noescape = True
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
noescape = False
elif kind is TEXT:
if noescape:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class TextSerializer(object):
"""Produces plain text from an event stream.
Only text events are included in the output. Unlike the other serializer,
special XML characters are not escaped:
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a('<Hello!>', href='foo'), tag.br)
>>> print(elem)
<div><a href="foo"><Hello!></a><br/></div>
>>> print(''.join(TextSerializer()(elem.generate())))
<Hello!>
If text events contain literal markup (instances of the `Markup` class),
that markup is by default passed through unchanged:
>>> elem = tag.div(Markup('<a href="foo">Hello & Bye!</a><br/>'))
>>> print(elem.generate().render(TextSerializer, encoding=None))
<a href="foo">Hello & Bye!</a><br/>
You can use the ``strip_markup`` to change this behavior, so that tags and
entities are stripped from the output (or in the case of entities,
replaced with the equivalent character):
>>> print(elem.generate().render(TextSerializer, strip_markup=True,
... encoding=None))
Hello & Bye!
"""
def __init__(self, strip_markup=False):
"""Create the serializer.
:param strip_markup: whether markup (tags and encoded characters) found
in the text should be removed
"""
self.strip_markup = strip_markup
def __call__(self, stream):
strip_markup = self.strip_markup
for event in stream:
if event[0] is TEXT:
data = event[1]
if strip_markup and type(data) is Markup:
data = data.striptags().stripentities()
yield unicode(data)
class EmptyTagFilter(object):
"""Combines `START` and `STOP` events into `EMPTY` events for elements that
have no contents.
"""
EMPTY = StreamEventKind('EMPTY')
def __call__(self, stream):
prev = (None, None, None)
for ev in stream:
if prev[0] is START:
if ev[0] is END:
prev = EMPTY, prev[1], prev[2]
yield prev
continue
else:
yield prev
if ev[0] is not START:
yield ev
prev = ev
EMPTY = EmptyTagFilter.EMPTY
class NamespaceFlattener(object):
r"""Output stream filter that removes namespace information from the stream,
instead adding namespace attributes and prefixes as needed.
:param prefixes: optional mapping of namespace URIs to prefixes
>>> from genshi.input import XML
>>> xml = XML('''<doc xmlns="NS1" xmlns:two="NS2">
... <two:item/>
... </doc>''')
>>> for kind, data, pos in NamespaceFlattener()(xml):
... print('%s %r' % (kind, data))
START (u'doc', Attrs([('xmlns', u'NS1'), (u'xmlns:two', u'NS2')]))
TEXT u'\n '
START (u'two:item', Attrs())
END u'two:item'
TEXT u'\n'
END u'doc'
"""
def __init__(self, prefixes=None, cache=True):
self.prefixes = {XML_NAMESPACE.uri: 'xml'}
if prefixes is not None:
self.prefixes.update(prefixes)
self.cache = cache
def __call__(self, stream):
prefixes = dict([(v, [k]) for k, v in self.prefixes.items()])
namespaces = {XML_NAMESPACE.uri: ['xml']}
_emit, _get, cache = _prepare_cache(self.cache)
def _push_ns(prefix, uri):
namespaces.setdefault(uri, []).append(prefix)
prefixes.setdefault(prefix, []).append(uri)
cache.clear()
def _pop_ns(prefix):
uris = prefixes.get(prefix)
uri = uris.pop()
if not uris:
del prefixes[prefix]
if uri not in uris or uri != uris[-1]:
uri_prefixes = namespaces[uri]
uri_prefixes.pop()
if not uri_prefixes:
del namespaces[uri]
cache.clear()
return uri
ns_attrs = []
_push_ns_attr = ns_attrs.append
def _make_ns_attr(prefix, uri):
return 'xmlns%s' % (prefix and ':%s' % prefix or ''), uri
def _gen_prefix():
val = 0
while 1:
val += 1
yield 'ns%d' % val
_gen_prefix = _gen_prefix().next
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield kind, data, pos
continue
output = _get((kind, data))
if output is not None:
yield kind, output, pos
elif kind is START or kind is EMPTY:
tag, attrs = data
tagname = tag.localname
tagns = tag.namespace
if tagns:
if tagns in namespaces:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
else:
_push_ns_attr(('xmlns', tagns))
_push_ns('', tagns)
new_attrs = []
for attr, value in attrs:
attrname = attr.localname
attrns = attr.namespace
if attrns:
if attrns not in namespaces:
prefix = _gen_prefix()
_push_ns(prefix, attrns)
_push_ns_attr(('xmlns:%s' % prefix, attrns))
else:
prefix = namespaces[attrns][-1]
if prefix:
attrname = '%s:%s' % (prefix, attrname)
new_attrs.append((attrname, value))
data = _emit(kind, data, (tagname, Attrs(ns_attrs + new_attrs)))
yield kind, data, pos
del ns_attrs[:]
elif kind is END:
tagname = data.localname
tagns = data.namespace
if tagns:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
yield kind, _emit(kind, data, tagname), pos
elif kind is START_NS:
prefix, uri = data
if uri not in namespaces:
prefix = prefixes.get(uri, [prefix])[-1]
_push_ns_attr(_make_ns_attr(prefix, uri))
_push_ns(prefix, uri)
elif kind is END_NS:
if data in prefixes:
uri = _pop_ns(data)
if ns_attrs:
attr = _make_ns_attr(data, uri)
if attr in ns_attrs:
ns_attrs.remove(attr)
else:
yield kind, data, pos
class WhitespaceFilter(object):
"""A filter that removes extraneous ignorable white space from the
stream.
"""
def __init__(self, preserve=None, noescape=None):
"""Initialize the filter.
:param preserve: a set or sequence of tag names for which white-space
should be preserved
:param noescape: a set or sequence of tag names for which text content
should not be escaped
The `noescape` set is expected to refer to elements that cannot contain
further child elements (such as ``<style>`` or ``<script>`` in HTML
documents).
"""
if preserve is None:
preserve = []
self.preserve = frozenset(preserve)
if noescape is None:
noescape = []
self.noescape = frozenset(noescape)
def __call__(self, stream, ctxt=None, space=XML_NAMESPACE['space'],
trim_trailing_space=re.compile('[ \t]+(?=\n)').sub,
collapse_lines=re.compile('\n{2,}').sub):
mjoin = Markup('').join
preserve_elems = self.preserve
preserve = 0
noescape_elems = self.noescape
noescape = False
textbuf = []
push_text = textbuf.append
pop_text = textbuf.pop
for kind, data, pos in chain(stream, [(None, None, None)]):
if kind is TEXT:
if noescape:
data = Markup(data)
push_text(data)
else:
if textbuf:
if len(textbuf) > 1:
text = mjoin(textbuf, escape_quotes=False)
del textbuf[:]
else:
text = escape(pop_text(), quotes=False)
if not preserve:
text = collapse_lines('\n', trim_trailing_space('', text))
yield TEXT, Markup(text), pos
if kind is START:
tag, attrs = data
if preserve or (tag in preserve_elems or
attrs.get(space) == 'preserve'):
preserve += 1
if not noescape and tag in noescape_elems:
noescape = True
elif kind is END:
noescape = False
if preserve:
preserve -= 1
elif kind is START_CDATA:
noescape = True
elif kind is END_CDATA:
noescape = False
if kind:
yield kind, data, pos
class DocTypeInserter(object):
"""A filter that inserts the DOCTYPE declaration in the correct location,
after the XML declaration.
"""
def __init__(self, doctype):
"""Initialize the filter.
:param doctype: DOCTYPE as a string or DocType object.
"""
if isinstance(doctype, basestring):
doctype = DocType.get(doctype)
self.doctype_event = (DOCTYPE, doctype, (None, -1, -1))
def __call__(self, stream):
doctype_inserted = False
for kind, data, pos in stream:
if not doctype_inserted:
doctype_inserted = True
if kind is XML_DECL:
yield (kind, data, pos)
yield self.doctype_event
continue
yield self.doctype_event
yield (kind, data, pos)
if not doctype_inserted:
yield self.doctype_event
|
def get_serializer(method='xml', **kwargs):
"""Return a serializer object for the given method.
:param method: the serialization method; can be either "xml", "xhtml",
"html", "text", or a custom serializer class
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:see: `XMLSerializer`, `XHTMLSerializer`, `HTMLSerializer`, `TextSerializer`
:since: version 0.4.1
"""
if isinstance(method, basestring):
method = {'xml': XMLSerializer,
'xhtml': XHTMLSerializer,
'html': HTMLSerializer,
'text': TextSerializer}[method.lower()]
return method(**kwargs)
| 62
| 79
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""This module provides different kinds of serialization methods for XML event
streams.
"""
from itertools import chain
import re
from genshi.core import escape, Attrs, Markup, Namespace, QName, StreamEventKind
from genshi.core import START, END, TEXT, XML_DECL, DOCTYPE, START_NS, END_NS, \
START_CDATA, END_CDATA, PI, COMMENT, XML_NAMESPACE
__all__ = ['encode', 'get_serializer', 'DocType', 'XMLSerializer',
'XHTMLSerializer', 'HTMLSerializer', 'TextSerializer']
__docformat__ = 'restructuredtext en'
def encode(iterator, method='xml', encoding=None, out=None):
"""Encode serializer output into a string.
:param iterator: the iterator returned from serializing a stream (basically
any iterator that yields unicode objects)
:param method: the serialization method; determines how characters not
representable in the specified encoding are treated
:param encoding: how the output string should be encoded; if set to `None`,
this method returns a `unicode` object
:param out: a file-like object that the output should be written to
instead of being returned as one big string; note that if
this is a file or socket (or similar), the `encoding` must
not be `None` (that is, the output must be encoded)
:return: a `str` or `unicode` object (depending on the `encoding`
parameter), or `None` if the `out` parameter is provided
:since: version 0.4.1
:note: Changed in 0.5: added the `out` parameter
"""
if encoding is not None:
errors = 'replace'
if method != 'text' and not isinstance(method, TextSerializer):
errors = 'xmlcharrefreplace'
_encode = lambda string: string.encode(encoding, errors)
else:
_encode = lambda string: string
if out is None:
return _encode(''.join(list(iterator)))
for chunk in iterator:
out.write(_encode(chunk))
def get_serializer(method='xml', **kwargs):
"""Return a serializer object for the given method.
:param method: the serialization method; can be either "xml", "xhtml",
"html", "text", or a custom serializer class
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:see: `XMLSerializer`, `XHTMLSerializer`, `HTMLSerializer`, `TextSerializer`
:since: version 0.4.1
"""
if isinstance(method, basestring):
method = {'xml': XMLSerializer,
'xhtml': XHTMLSerializer,
'html': HTMLSerializer,
'text': TextSerializer}[method.lower()]
return method(**kwargs)
def _prepare_cache(use_cache=True):
"""Prepare a private token serialization cache.
:param use_cache: boolean indicating whether a real cache should
be used or not. If not, the returned functions
are no-ops.
:return: emit and get functions, for storing and retrieving
serialized values from the cache.
"""
cache = {}
if use_cache:
def _emit(kind, input, output):
cache[kind, input] = output
return output
_get = cache.get
else:
def _emit(kind, input, output):
return output
def _get(key):
pass
return _emit, _get, cache
class DocType(object):
"""Defines a number of commonly used DOCTYPE declarations as constants."""
HTML_STRICT = (
'html', '-//W3C//DTD HTML 4.01//EN',
'http://www.w3.org/TR/html4/strict.dtd'
)
HTML_TRANSITIONAL = (
'html', '-//W3C//DTD HTML 4.01 Transitional//EN',
'http://www.w3.org/TR/html4/loose.dtd'
)
HTML_FRAMESET = (
'html', '-//W3C//DTD HTML 4.01 Frameset//EN',
'http://www.w3.org/TR/html4/frameset.dtd'
)
HTML = HTML_STRICT
HTML5 = ('html', None, None)
XHTML_STRICT = (
'html', '-//W3C//DTD XHTML 1.0 Strict//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd'
)
XHTML_TRANSITIONAL = (
'html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'
)
XHTML_FRAMESET = (
'html', '-//W3C//DTD XHTML 1.0 Frameset//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd'
)
XHTML = XHTML_STRICT
XHTML11 = (
'html', '-//W3C//DTD XHTML 1.1//EN',
'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'
)
SVG_FULL = (
'svg', '-//W3C//DTD SVG 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'
)
SVG_BASIC = (
'svg', '-//W3C//DTD SVG Basic 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-basic.dtd'
)
SVG_TINY = (
'svg', '-//W3C//DTD SVG Tiny 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-tiny.dtd'
)
SVG = SVG_FULL
@classmethod
def get(cls, name):
"""Return the ``(name, pubid, sysid)`` tuple of the ``DOCTYPE``
declaration for the specified name.
The following names are recognized in this version:
* "html" or "html-strict" for the HTML 4.01 strict DTD
* "html-transitional" for the HTML 4.01 transitional DTD
* "html-frameset" for the HTML 4.01 frameset DTD
* "html5" for the ``DOCTYPE`` proposed for HTML5
* "xhtml" or "xhtml-strict" for the XHTML 1.0 strict DTD
* "xhtml-transitional" for the XHTML 1.0 transitional DTD
* "xhtml-frameset" for the XHTML 1.0 frameset DTD
* "xhtml11" for the XHTML 1.1 DTD
* "svg" or "svg-full" for the SVG 1.1 DTD
* "svg-basic" for the SVG Basic 1.1 DTD
* "svg-tiny" for the SVG Tiny 1.1 DTD
:param name: the name of the ``DOCTYPE``
:return: the ``(name, pubid, sysid)`` tuple for the requested
``DOCTYPE``, or ``None`` if the name is not recognized
:since: version 0.4.1
"""
return {
'html': cls.HTML, 'html-strict': cls.HTML_STRICT,
'html-transitional': DocType.HTML_TRANSITIONAL,
'html-frameset': DocType.HTML_FRAMESET,
'html5': cls.HTML5,
'xhtml': cls.XHTML, 'xhtml-strict': cls.XHTML_STRICT,
'xhtml-transitional': cls.XHTML_TRANSITIONAL,
'xhtml-frameset': cls.XHTML_FRAMESET,
'xhtml11': cls.XHTML11,
'svg': cls.SVG, 'svg-full': cls.SVG_FULL,
'svg-basic': cls.SVG_BASIC,
'svg-tiny': cls.SVG_TINY
}.get(name.lower())
class XMLSerializer(object):
"""Produces XML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XMLSerializer()(elem.generate())))
<div><a href="foo"/><br/><hr noshade="True"/></div>
"""
_PRESERVE_SPACE = frozenset()
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, cache=True):
"""Initialize the XML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output, or the name of a DOCTYPE as
defined in `DocType.get`
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.4.2: The `doctype` parameter can now be a string.
:note: Changed in 0.6: The `cache` parameter was added
"""
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = cache
def _prepare_cache(self):
return _prepare_cache(self.cache)[:2]
def __call__(self, stream):
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
buf += [' ', attr, '="', escape(value), '"']
buf.append(kind is EMPTY and '/>' or '>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is XML_DECL and not have_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class XHTMLSerializer(XMLSerializer):
"""Produces XHTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XHTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br /><hr noshade="noshade" /></div>
"""
_EMPTY_ELEMS = frozenset(['area', 'base', 'basefont', 'br', 'col', 'frame',
'hr', 'img', 'input', 'isindex', 'link', 'meta',
'param'])
_BOOLEAN_ATTRS = frozenset(['selected', 'checked', 'compact', 'declare',
'defer', 'disabled', 'ismap', 'multiple',
'nohref', 'noresize', 'noshade', 'nowrap'])
_PRESERVE_SPACE = frozenset([
QName('pre'), QName('http://www.w3.org/1999/xhtml}pre'),
QName('textarea'), QName('http://www.w3.org/1999/xhtml}textarea')
])
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, drop_xml_decl=True, cache=True):
super(XHTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
namespace_prefixes = namespace_prefixes or {}
namespace_prefixes['http://www.w3.org/1999/xhtml'] = ''
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.drop_xml_decl = drop_xml_decl
self.cache = cache
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
drop_xml_decl = self.drop_xml_decl
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
value = attr
elif attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr == 'xml:space':
continue
buf += [' ', attr, '="', escape(value), '"']
if kind is EMPTY:
if tag in empty_elems:
buf.append(' />')
else:
buf.append('></%s>' % tag)
else:
buf.append('>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is XML_DECL and not have_decl and not drop_xml_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class HTMLSerializer(XHTMLSerializer):
"""Produces HTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(HTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br><hr noshade></div>
"""
_NOESCAPE_ELEMS = frozenset([
QName('script'), QName('http://www.w3.org/1999/xhtml}script'),
QName('style'), QName('http://www.w3.org/1999/xhtml}style')
])
def __init__(self, doctype=None, strip_whitespace=True, cache=True):
"""Initialize the HTML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.6: The `cache` parameter was added
"""
super(HTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE,
self._NOESCAPE_ELEMS))
self.filters.append(NamespaceFlattener(prefixes={
'http://www.w3.org/1999/xhtml': ''
}, cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = True
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
noescape_elems = self._NOESCAPE_ELEMS
have_doctype = False
noescape = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, _ in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
output = _get((kind, data))
if output is not None:
yield output
if (kind is START or kind is EMPTY) \
and data[0] in noescape_elems:
noescape = True
elif kind is END:
noescape = False
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
if value:
buf += [' ', attr]
elif ':' in attr:
if attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr != 'xmlns':
buf += [' ', attr, '="', escape(value), '"']
buf.append('>')
if kind is EMPTY:
if tag not in empty_elems:
buf.append('</%s>' % tag)
yield _emit(kind, data, Markup(''.join(buf)))
if tag in noescape_elems:
noescape = True
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
noescape = False
elif kind is TEXT:
if noescape:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class TextSerializer(object):
"""Produces plain text from an event stream.
Only text events are included in the output. Unlike the other serializer,
special XML characters are not escaped:
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a('<Hello!>', href='foo'), tag.br)
>>> print(elem)
<div><a href="foo"><Hello!></a><br/></div>
>>> print(''.join(TextSerializer()(elem.generate())))
<Hello!>
If text events contain literal markup (instances of the `Markup` class),
that markup is by default passed through unchanged:
>>> elem = tag.div(Markup('<a href="foo">Hello & Bye!</a><br/>'))
>>> print(elem.generate().render(TextSerializer, encoding=None))
<a href="foo">Hello & Bye!</a><br/>
You can use the ``strip_markup`` to change this behavior, so that tags and
entities are stripped from the output (or in the case of entities,
replaced with the equivalent character):
>>> print(elem.generate().render(TextSerializer, strip_markup=True,
... encoding=None))
Hello & Bye!
"""
def __init__(self, strip_markup=False):
"""Create the serializer.
:param strip_markup: whether markup (tags and encoded characters) found
in the text should be removed
"""
self.strip_markup = strip_markup
def __call__(self, stream):
strip_markup = self.strip_markup
for event in stream:
if event[0] is TEXT:
data = event[1]
if strip_markup and type(data) is Markup:
data = data.striptags().stripentities()
yield unicode(data)
class EmptyTagFilter(object):
"""Combines `START` and `STOP` events into `EMPTY` events for elements that
have no contents.
"""
EMPTY = StreamEventKind('EMPTY')
def __call__(self, stream):
prev = (None, None, None)
for ev in stream:
if prev[0] is START:
if ev[0] is END:
prev = EMPTY, prev[1], prev[2]
yield prev
continue
else:
yield prev
if ev[0] is not START:
yield ev
prev = ev
EMPTY = EmptyTagFilter.EMPTY
class NamespaceFlattener(object):
r"""Output stream filter that removes namespace information from the stream,
instead adding namespace attributes and prefixes as needed.
:param prefixes: optional mapping of namespace URIs to prefixes
>>> from genshi.input import XML
>>> xml = XML('''<doc xmlns="NS1" xmlns:two="NS2">
... <two:item/>
... </doc>''')
>>> for kind, data, pos in NamespaceFlattener()(xml):
... print('%s %r' % (kind, data))
START (u'doc', Attrs([('xmlns', u'NS1'), (u'xmlns:two', u'NS2')]))
TEXT u'\n '
START (u'two:item', Attrs())
END u'two:item'
TEXT u'\n'
END u'doc'
"""
def __init__(self, prefixes=None, cache=True):
self.prefixes = {XML_NAMESPACE.uri: 'xml'}
if prefixes is not None:
self.prefixes.update(prefixes)
self.cache = cache
def __call__(self, stream):
prefixes = dict([(v, [k]) for k, v in self.prefixes.items()])
namespaces = {XML_NAMESPACE.uri: ['xml']}
_emit, _get, cache = _prepare_cache(self.cache)
def _push_ns(prefix, uri):
namespaces.setdefault(uri, []).append(prefix)
prefixes.setdefault(prefix, []).append(uri)
cache.clear()
def _pop_ns(prefix):
uris = prefixes.get(prefix)
uri = uris.pop()
if not uris:
del prefixes[prefix]
if uri not in uris or uri != uris[-1]:
uri_prefixes = namespaces[uri]
uri_prefixes.pop()
if not uri_prefixes:
del namespaces[uri]
cache.clear()
return uri
ns_attrs = []
_push_ns_attr = ns_attrs.append
def _make_ns_attr(prefix, uri):
return 'xmlns%s' % (prefix and ':%s' % prefix or ''), uri
def _gen_prefix():
val = 0
while 1:
val += 1
yield 'ns%d' % val
_gen_prefix = _gen_prefix().next
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield kind, data, pos
continue
output = _get((kind, data))
if output is not None:
yield kind, output, pos
elif kind is START or kind is EMPTY:
tag, attrs = data
tagname = tag.localname
tagns = tag.namespace
if tagns:
if tagns in namespaces:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
else:
_push_ns_attr(('xmlns', tagns))
_push_ns('', tagns)
new_attrs = []
for attr, value in attrs:
attrname = attr.localname
attrns = attr.namespace
if attrns:
if attrns not in namespaces:
prefix = _gen_prefix()
_push_ns(prefix, attrns)
_push_ns_attr(('xmlns:%s' % prefix, attrns))
else:
prefix = namespaces[attrns][-1]
if prefix:
attrname = '%s:%s' % (prefix, attrname)
new_attrs.append((attrname, value))
data = _emit(kind, data, (tagname, Attrs(ns_attrs + new_attrs)))
yield kind, data, pos
del ns_attrs[:]
elif kind is END:
tagname = data.localname
tagns = data.namespace
if tagns:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
yield kind, _emit(kind, data, tagname), pos
elif kind is START_NS:
prefix, uri = data
if uri not in namespaces:
prefix = prefixes.get(uri, [prefix])[-1]
_push_ns_attr(_make_ns_attr(prefix, uri))
_push_ns(prefix, uri)
elif kind is END_NS:
if data in prefixes:
uri = _pop_ns(data)
if ns_attrs:
attr = _make_ns_attr(data, uri)
if attr in ns_attrs:
ns_attrs.remove(attr)
else:
yield kind, data, pos
class WhitespaceFilter(object):
"""A filter that removes extraneous ignorable white space from the
stream.
"""
def __init__(self, preserve=None, noescape=None):
"""Initialize the filter.
:param preserve: a set or sequence of tag names for which white-space
should be preserved
:param noescape: a set or sequence of tag names for which text content
should not be escaped
The `noescape` set is expected to refer to elements that cannot contain
further child elements (such as ``<style>`` or ``<script>`` in HTML
documents).
"""
if preserve is None:
preserve = []
self.preserve = frozenset(preserve)
if noescape is None:
noescape = []
self.noescape = frozenset(noescape)
def __call__(self, stream, ctxt=None, space=XML_NAMESPACE['space'],
trim_trailing_space=re.compile('[ \t]+(?=\n)').sub,
collapse_lines=re.compile('\n{2,}').sub):
mjoin = Markup('').join
preserve_elems = self.preserve
preserve = 0
noescape_elems = self.noescape
noescape = False
textbuf = []
push_text = textbuf.append
pop_text = textbuf.pop
for kind, data, pos in chain(stream, [(None, None, None)]):
if kind is TEXT:
if noescape:
data = Markup(data)
push_text(data)
else:
if textbuf:
if len(textbuf) > 1:
text = mjoin(textbuf, escape_quotes=False)
del textbuf[:]
else:
text = escape(pop_text(), quotes=False)
if not preserve:
text = collapse_lines('\n', trim_trailing_space('', text))
yield TEXT, Markup(text), pos
if kind is START:
tag, attrs = data
if preserve or (tag in preserve_elems or
attrs.get(space) == 'preserve'):
preserve += 1
if not noescape and tag in noescape_elems:
noescape = True
elif kind is END:
noescape = False
if preserve:
preserve -= 1
elif kind is START_CDATA:
noescape = True
elif kind is END_CDATA:
noescape = False
if kind:
yield kind, data, pos
class DocTypeInserter(object):
"""A filter that inserts the DOCTYPE declaration in the correct location,
after the XML declaration.
"""
def __init__(self, doctype):
"""Initialize the filter.
:param doctype: DOCTYPE as a string or DocType object.
"""
if isinstance(doctype, basestring):
doctype = DocType.get(doctype)
self.doctype_event = (DOCTYPE, doctype, (None, -1, -1))
def __call__(self, stream):
doctype_inserted = False
for kind, data, pos in stream:
if not doctype_inserted:
doctype_inserted = True
if kind is XML_DECL:
yield (kind, data, pos)
yield self.doctype_event
continue
yield self.doctype_event
yield (kind, data, pos)
if not doctype_inserted:
yield self.doctype_event
|
correlation_columns
|
Columns that are correlated to the target point
Parameters
----------
dataset: pd.DataFrame
The pandas dataframe
target_column: str
The target column to calculate correlation against
k: float
The correlation cuttoff point; defaults to -0.5 and 0.5.
The values passed in represents the negative and positive cutofff
Returns
-------
columns: list
A list of columns that are correlated to the target column based on the cutoff point
|
# build_features.py
# This module holds utility classes and functions that creates and manipulates input features
# This module also holds the various input transformers
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
# MASKED: correlation_columns function (lines 9-37)
class ColumnExtractor(BaseEstimator, TransformerMixin):
"""Columns Extractor based on correlation to the output label"""
def __init__(self, columns):
print(columns)
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X[self.columns]
if __name__ == '__main__':
correlation_columns(pd.read_csv('././data/raw/creditcard.csv'), 'Class', k=0.3)
|
def correlation_columns(dataset: pd.DataFrame, target_column: str, k: float=0.5):
"""
Columns that are correlated to the target point
Parameters
----------
dataset: pd.DataFrame
The pandas dataframe
target_column: str
The target column to calculate correlation against
k: float
The correlation cuttoff point; defaults to -0.5 and 0.5.
The values passed in represents the negative and positive cutofff
Returns
-------
columns: list
A list of columns that are correlated to the target column based on the cutoff point
"""
corr = np.abs(dataset.corr()[target_column])
corr_sorted = corr.sort_values(ascending=False)
columns = [col for col, value in zip(corr_sorted.index, corr_sorted.values) if value >= k and col != target_column]
return columns
| 9
| 37
|
# build_features.py
# This module holds utility classes and functions that creates and manipulates input features
# This module also holds the various input transformers
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
def correlation_columns(dataset: pd.DataFrame, target_column: str, k: float=0.5):
"""
Columns that are correlated to the target point
Parameters
----------
dataset: pd.DataFrame
The pandas dataframe
target_column: str
The target column to calculate correlation against
k: float
The correlation cuttoff point; defaults to -0.5 and 0.5.
The values passed in represents the negative and positive cutofff
Returns
-------
columns: list
A list of columns that are correlated to the target column based on the cutoff point
"""
corr = np.abs(dataset.corr()[target_column])
corr_sorted = corr.sort_values(ascending=False)
columns = [col for col, value in zip(corr_sorted.index, corr_sorted.values) if value >= k and col != target_column]
return columns
class ColumnExtractor(BaseEstimator, TransformerMixin):
"""Columns Extractor based on correlation to the output label"""
def __init__(self, columns):
print(columns)
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X[self.columns]
if __name__ == '__main__':
correlation_columns(pd.read_csv('././data/raw/creditcard.csv'), 'Class', k=0.3)
|
get_dtype_counts
|
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
# MASKED: get_dtype_counts function (lines 395-439)
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
| 395
| 439
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
mean
|
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
# MASKED: mean function (lines 1132-1193)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
| 1,132
| 1,193
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
sum
|
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
# MASKED: sum function (lines 1195-1278)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
| 1,195
| 1,278
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
product
|
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
# MASKED: product function (lines 1280-1375)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
| 1,280
| 1,375
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
skew
|
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
# MASKED: skew function (lines 1379-1433)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
| 1,379
| 1,433
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
kurtosis
|
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
# MASKED: kurtosis function (lines 1435-1490)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
| 1,435
| 1,490
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
min
|
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
# MASKED: min function (lines 1494-1547)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
| 1,494
| 1,547
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
max
|
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
# MASKED: max function (lines 1549-1602)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
| 1,549
| 1,602
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
bool
|
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
# MASKED: bool function (lines 2255-2299)
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
| 2,255
| 2,299
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
__init__
|
Create a new Client with an FServiceProvider containing a transport
and protocol factory.
Args:
provider: FServiceProvider
middleware: ServiceMiddleware or list of ServiceMiddleware
|
#
# Autogenerated by Frugal Compiler (3.4.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
import asyncio
from datetime import timedelta
import inspect
from frugal.aio.processor import FBaseProcessor
from frugal.aio.processor import FProcessorFunction
from frugal.exceptions import TApplicationExceptionType
from frugal.exceptions import TTransportExceptionType
from frugal.middleware import Method
from frugal.transport import TMemoryOutputBuffer
from frugal.util.deprecate import deprecated
from thrift.Thrift import TApplicationException
from thrift.Thrift import TMessageType
from thrift.transport.TTransport import TTransportException
from . import f_BasePinger
from .ttypes import *
class Iface(f_BasePinger.Iface):
async def ping(self, ctx):
"""
Args:
ctx: FContext
"""
pass
class Client(f_BasePinger.Client, Iface):
# MASKED: __init__ function (lines 39-55)
async def ping(self, ctx):
"""
Args:
ctx: FContext
"""
return await self._methods['ping']([ctx])
async def _ping(self, ctx):
memory_buffer = TMemoryOutputBuffer(self._transport.get_request_size_limit())
oprot = self._protocol_factory.get_protocol(memory_buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('ping', TMessageType.CALL, 0)
args = ping_args()
args.write(oprot)
oprot.writeMessageEnd()
response_transport = await self._transport.request(ctx, memory_buffer.getvalue())
iprot = self._protocol_factory.get_protocol(response_transport)
iprot.read_response_headers(ctx)
_, mtype, _ = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = ping_result()
result.read(iprot)
iprot.readMessageEnd()
class Processor(f_BasePinger.Processor):
def __init__(self, handler, middleware=None):
"""
Create a new Processor.
Args:
handler: Iface
"""
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Processor, self).__init__(handler, middleware=middleware)
self.add_to_processor_map('ping', _ping(Method(handler.ping, middleware), self.get_write_lock()))
class _ping(FProcessorFunction):
def __init__(self, handler, lock):
super(_ping, self).__init__(handler, lock)
async def process(self, ctx, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
try:
ret = self._handler([ctx])
if inspect.iscoroutine(ret):
ret = await ret
except TApplicationException as ex:
async with self._lock:
_write_application_exception(ctx, oprot, "ping", exception=ex)
return
except Exception as e:
async with self._lock:
_write_application_exception(ctx, oprot, "ping", ex_code=TApplicationExceptionType.INTERNAL_ERROR, message=str(e))
raise
async with self._lock:
try:
oprot.write_response_headers(ctx)
oprot.writeMessageBegin('ping', TMessageType.REPLY, 0)
result.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
except TTransportException as e:
# catch a request too large error because the TMemoryOutputBuffer always throws that if too much data is written
if e.type == TTransportExceptionType.REQUEST_TOO_LARGE:
raise _write_application_exception(ctx, oprot, "ping", ex_code=TApplicationExceptionType.RESPONSE_TOO_LARGE, message=e.message)
else:
raise e
def _write_application_exception(ctx, oprot, method, ex_code=None, message=None, exception=None):
if exception is not None:
x = exception
else:
x = TApplicationException(type=ex_code, message=message)
oprot.write_response_headers(ctx)
oprot.writeMessageBegin(method, TMessageType.EXCEPTION, 0)
x.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
return x
class ping_args(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_result(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('ping_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
def __init__(self, provider, middleware=None):
"""
Create a new Client with an FServiceProvider containing a transport
and protocol factory.
Args:
provider: FServiceProvider
middleware: ServiceMiddleware or list of ServiceMiddleware
"""
middleware = middleware or []
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Client, self).__init__(provider, middleware=middleware)
middleware += provider.get_middleware()
self._methods.update({
'ping': Method(self._ping, middleware),
})
| 39
| 55
|
#
# Autogenerated by Frugal Compiler (3.4.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
import asyncio
from datetime import timedelta
import inspect
from frugal.aio.processor import FBaseProcessor
from frugal.aio.processor import FProcessorFunction
from frugal.exceptions import TApplicationExceptionType
from frugal.exceptions import TTransportExceptionType
from frugal.middleware import Method
from frugal.transport import TMemoryOutputBuffer
from frugal.util.deprecate import deprecated
from thrift.Thrift import TApplicationException
from thrift.Thrift import TMessageType
from thrift.transport.TTransport import TTransportException
from . import f_BasePinger
from .ttypes import *
class Iface(f_BasePinger.Iface):
async def ping(self, ctx):
"""
Args:
ctx: FContext
"""
pass
class Client(f_BasePinger.Client, Iface):
def __init__(self, provider, middleware=None):
"""
Create a new Client with an FServiceProvider containing a transport
and protocol factory.
Args:
provider: FServiceProvider
middleware: ServiceMiddleware or list of ServiceMiddleware
"""
middleware = middleware or []
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Client, self).__init__(provider, middleware=middleware)
middleware += provider.get_middleware()
self._methods.update({
'ping': Method(self._ping, middleware),
})
async def ping(self, ctx):
"""
Args:
ctx: FContext
"""
return await self._methods['ping']([ctx])
async def _ping(self, ctx):
memory_buffer = TMemoryOutputBuffer(self._transport.get_request_size_limit())
oprot = self._protocol_factory.get_protocol(memory_buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('ping', TMessageType.CALL, 0)
args = ping_args()
args.write(oprot)
oprot.writeMessageEnd()
response_transport = await self._transport.request(ctx, memory_buffer.getvalue())
iprot = self._protocol_factory.get_protocol(response_transport)
iprot.read_response_headers(ctx)
_, mtype, _ = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = ping_result()
result.read(iprot)
iprot.readMessageEnd()
class Processor(f_BasePinger.Processor):
def __init__(self, handler, middleware=None):
"""
Create a new Processor.
Args:
handler: Iface
"""
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Processor, self).__init__(handler, middleware=middleware)
self.add_to_processor_map('ping', _ping(Method(handler.ping, middleware), self.get_write_lock()))
class _ping(FProcessorFunction):
def __init__(self, handler, lock):
super(_ping, self).__init__(handler, lock)
async def process(self, ctx, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
try:
ret = self._handler([ctx])
if inspect.iscoroutine(ret):
ret = await ret
except TApplicationException as ex:
async with self._lock:
_write_application_exception(ctx, oprot, "ping", exception=ex)
return
except Exception as e:
async with self._lock:
_write_application_exception(ctx, oprot, "ping", ex_code=TApplicationExceptionType.INTERNAL_ERROR, message=str(e))
raise
async with self._lock:
try:
oprot.write_response_headers(ctx)
oprot.writeMessageBegin('ping', TMessageType.REPLY, 0)
result.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
except TTransportException as e:
# catch a request too large error because the TMemoryOutputBuffer always throws that if too much data is written
if e.type == TTransportExceptionType.REQUEST_TOO_LARGE:
raise _write_application_exception(ctx, oprot, "ping", ex_code=TApplicationExceptionType.RESPONSE_TOO_LARGE, message=e.message)
else:
raise e
def _write_application_exception(ctx, oprot, method, ex_code=None, message=None, exception=None):
if exception is not None:
x = exception
else:
x = TApplicationException(type=ex_code, message=message)
oprot.write_response_headers(ctx)
oprot.writeMessageBegin(method, TMessageType.EXCEPTION, 0)
x.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
return x
class ping_args(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_result(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('ping_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
final_eos_is_already_included
|
Args:
header_block: An overflow block, with potentially missing information about the new sub slot
blocks: all blocks that have been included before header_block
sub_slot_iters: sub_slot_iters at the header_block
Returns: True iff the missing sub slot was already included in a previous block. Returns False if the sub
slot was not included yet, and therefore it is the responsibility of this block to include it
|
import logging
from typing import List, Union
from cactus.consensus.block_record import BlockRecord
from cactus.consensus.blockchain_interface import BlockchainInterface
from cactus.consensus.constants import ConsensusConstants
from cactus.types.blockchain_format.sized_bytes import bytes32
from cactus.types.full_block import FullBlock
from cactus.types.header_block import HeaderBlock
from cactus.types.unfinished_block import UnfinishedBlock
from cactus.types.unfinished_header_block import UnfinishedHeaderBlock
from cactus.util.ints import uint64
log = logging.getLogger(__name__)
# MASKED: final_eos_is_already_included function (lines 17-51)
def get_block_challenge(
constants: ConsensusConstants,
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
genesis_block: bool,
overflow: bool,
skip_overflow_last_ss_validation: bool,
):
if len(header_block.finished_sub_slots) > 0:
if overflow:
# New sub-slot with overflow block
if skip_overflow_last_ss_validation:
# In this case, we are missing the final sub-slot bundle (it's not finished yet), however
# There is a whole empty slot before this block is infused
challenge: bytes32 = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
challenge = header_block.finished_sub_slots[
-1
].challenge_chain.challenge_chain_end_of_slot_vdf.challenge
else:
# No overflow, new slot with a new challenge
challenge = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
if genesis_block:
challenge = constants.GENESIS_CHALLENGE
else:
if overflow:
if skip_overflow_last_ss_validation:
# Overflow infusion without the new slot, so get the last challenge
challenges_to_look_for = 1
else:
# Overflow infusion, so get the second to last challenge. skip_overflow_last_ss_validation is False,
# Which means no sub slots are omitted
challenges_to_look_for = 2
else:
challenges_to_look_for = 1
reversed_challenge_hashes: List[bytes32] = []
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
while len(reversed_challenge_hashes) < challenges_to_look_for:
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
reversed_challenge_hashes += reversed(curr.finished_challenge_slot_hashes)
if curr.height == 0:
assert curr.finished_challenge_slot_hashes is not None
assert len(curr.finished_challenge_slot_hashes) > 0
break
curr = blocks.block_record(curr.prev_hash)
challenge = reversed_challenge_hashes[challenges_to_look_for - 1]
return challenge
|
def final_eos_is_already_included(
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
sub_slot_iters: uint64,
) -> bool:
"""
Args:
header_block: An overflow block, with potentially missing information about the new sub slot
blocks: all blocks that have been included before header_block
sub_slot_iters: sub_slot_iters at the header_block
Returns: True iff the missing sub slot was already included in a previous block. Returns False if the sub
slot was not included yet, and therefore it is the responsibility of this block to include it
"""
if len(header_block.finished_sub_slots) > 0:
# We already have an included empty sub slot, which means the prev block is 2 sub slots behind.
return False
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
# We also check if curr is close to header_block, which means it's in the same sub slot
seen_overflow_block = curr.overflow and (header_block.total_iters - curr.total_iters < sub_slot_iters // 2)
while not curr.first_in_sub_slot and not curr.height == 0:
if curr.overflow and header_block.total_iters - curr.total_iters < sub_slot_iters // 2:
seen_overflow_block = True
curr = blocks.block_record(curr.prev_hash)
if curr.first_in_sub_slot and seen_overflow_block:
# We have seen another overflow block in this slot (same as header_block), therefore there are no
# missing sub slots
return True
# We have not seen any overflow blocks, therefore header_block will have to include the missing sub slot in
# the future
return False
| 17
| 51
|
import logging
from typing import List, Union
from cactus.consensus.block_record import BlockRecord
from cactus.consensus.blockchain_interface import BlockchainInterface
from cactus.consensus.constants import ConsensusConstants
from cactus.types.blockchain_format.sized_bytes import bytes32
from cactus.types.full_block import FullBlock
from cactus.types.header_block import HeaderBlock
from cactus.types.unfinished_block import UnfinishedBlock
from cactus.types.unfinished_header_block import UnfinishedHeaderBlock
from cactus.util.ints import uint64
log = logging.getLogger(__name__)
def final_eos_is_already_included(
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
sub_slot_iters: uint64,
) -> bool:
"""
Args:
header_block: An overflow block, with potentially missing information about the new sub slot
blocks: all blocks that have been included before header_block
sub_slot_iters: sub_slot_iters at the header_block
Returns: True iff the missing sub slot was already included in a previous block. Returns False if the sub
slot was not included yet, and therefore it is the responsibility of this block to include it
"""
if len(header_block.finished_sub_slots) > 0:
# We already have an included empty sub slot, which means the prev block is 2 sub slots behind.
return False
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
# We also check if curr is close to header_block, which means it's in the same sub slot
seen_overflow_block = curr.overflow and (header_block.total_iters - curr.total_iters < sub_slot_iters // 2)
while not curr.first_in_sub_slot and not curr.height == 0:
if curr.overflow and header_block.total_iters - curr.total_iters < sub_slot_iters // 2:
seen_overflow_block = True
curr = blocks.block_record(curr.prev_hash)
if curr.first_in_sub_slot and seen_overflow_block:
# We have seen another overflow block in this slot (same as header_block), therefore there are no
# missing sub slots
return True
# We have not seen any overflow blocks, therefore header_block will have to include the missing sub slot in
# the future
return False
def get_block_challenge(
constants: ConsensusConstants,
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
genesis_block: bool,
overflow: bool,
skip_overflow_last_ss_validation: bool,
):
if len(header_block.finished_sub_slots) > 0:
if overflow:
# New sub-slot with overflow block
if skip_overflow_last_ss_validation:
# In this case, we are missing the final sub-slot bundle (it's not finished yet), however
# There is a whole empty slot before this block is infused
challenge: bytes32 = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
challenge = header_block.finished_sub_slots[
-1
].challenge_chain.challenge_chain_end_of_slot_vdf.challenge
else:
# No overflow, new slot with a new challenge
challenge = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
if genesis_block:
challenge = constants.GENESIS_CHALLENGE
else:
if overflow:
if skip_overflow_last_ss_validation:
# Overflow infusion without the new slot, so get the last challenge
challenges_to_look_for = 1
else:
# Overflow infusion, so get the second to last challenge. skip_overflow_last_ss_validation is False,
# Which means no sub slots are omitted
challenges_to_look_for = 2
else:
challenges_to_look_for = 1
reversed_challenge_hashes: List[bytes32] = []
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
while len(reversed_challenge_hashes) < challenges_to_look_for:
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
reversed_challenge_hashes += reversed(curr.finished_challenge_slot_hashes)
if curr.height == 0:
assert curr.finished_challenge_slot_hashes is not None
assert len(curr.finished_challenge_slot_hashes) > 0
break
curr = blocks.block_record(curr.prev_hash)
challenge = reversed_challenge_hashes[challenges_to_look_for - 1]
return challenge
|
parse_args
|
Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f'f' "g"g" "i""i" 'j''j' k" "k l' l' mm n\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n
']
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
# MASKED: parse_args function (lines 256-273)
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
| 256
| 273
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
local
|
Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
# MASKED: local function (lines 326-348)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
| 326
| 348
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
sql
|
Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
# MASKED: sql function (lines 385-425)
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
| 385
| 425
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
kill
|
Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
# MASKED: kill function (lines 427-444)
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
| 427
| 444
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
rebalance
|
Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
# MASKED: rebalance function (lines 571-600)
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
| 571
| 600
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
shell
|
Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
# MASKED: shell function (lines 676-692)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
| 676
| 692
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
nimbus
|
Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
# MASKED: nimbus function (lines 712-732)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
| 712
| 732
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
pacemaker
|
Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
# MASKED: pacemaker function (lines 734-754)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
| 734
| 754
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
supervisor
|
Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
# MASKED: supervisor function (lines 756-776)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
| 756
| 776
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
ui
|
Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
# MASKED: ui function (lines 778-802)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
| 778
| 802
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
logviewer
|
Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
# MASKED: logviewer function (lines 804-828)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
| 804
| 828
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
drpc
|
Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
# MASKED: drpc function (lines 850-872)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
| 850
| 872
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
dev_zookeeper
|
Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
# MASKED: dev_zookeeper function (lines 874-891)
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
| 874
| 891
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
|
flatten_nested_df
|
Flatten the lists and dictionaries of the input data frame.
Parameters
----------
df : pd.DataFrame
The input data frame
include_prefix : bool, optional
If True, then it will prefix the new column name with the original column name.
Defaults to True.
seperator : str, optional
The seperator to use between field names when a dictionary is exploded.
Defaults to "_".
recursive : bool, optional
If True, then this will recurse the fields in the data frame. Defaults to True.
Returns
-------
pd.DataFrame: The flattened data frame
|
"""Amazon Neptune Module."""
import logging
import re
from typing import Any
import pandas as pd
from gremlin_python.process.graph_traversal import GraphTraversalSource, __
from gremlin_python.process.translator import Translator
from gremlin_python.process.traversal import Cardinality, T
from gremlin_python.structure.graph import Graph
from awswrangler import exceptions
from awswrangler.neptune.client import NeptuneClient
_logger: logging.Logger = logging.getLogger(__name__)
def execute_gremlin(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a Gremlin traversal as pandas dataframe.
Parameters
----------
client : neptune.Client
instance of the neptune client to use
traversal : str
The gremlin traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a Gremlin Query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_gremlin(client, "g.V().limit(1)")
"""
results = client.read_gremlin(query)
df = pd.DataFrame.from_records(results)
return df
def execute_opencypher(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a openCypher traversal as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The openCypher query to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run an openCypher query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> resp = wr.neptune.execute_opencypher(client, "MATCH (n) RETURN n LIMIT 1")
"""
resp = client.read_opencypher(query)
df = pd.DataFrame.from_dict(resp)
return df
def execute_sparql(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a SPARQL query as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The SPARQL traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a SPARQL query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_sparql(client, "PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE {
?person foaf:name ?name .
"""
data = client.read_sparql(query)
df = None
if "results" in data and "bindings" in data["results"]:
df = pd.DataFrame(data["results"]["bindings"])
df.applymap(lambda x: x["value"])
else:
df = pd.DataFrame(data)
return df
def to_property_graph(
client: NeptuneClient, df: pd.DataFrame, batch_size: int = 50, use_header_cardinality: bool = True
) -> bool:
"""Write records stored in a DataFrame into Amazon Neptune.
If writing to a property graph then DataFrames for vertices and edges must be written separately.
DataFrames for vertices must have a ~label column with the label and a ~id column for the vertex id.
If the ~id column does not exist, the specified id does not exists, or is empty then a new vertex will be added.
If no ~label column exists an exception will be thrown.
DataFrames for edges must have a ~id, ~label, ~to, and ~from column. If the ~id column does not exist
the specified id does not exists, or is empty then a new edge will be added. If no ~label, ~to, or ~from column
exists an exception will be thrown.
If you would like to save data using `single` cardinality then you can postfix (single) to the column header and
set use_header_cardinality=True (default). e.g. A column named `name(single)` will save the `name` property
as single
cardinality. You can disable this by setting by setting `use_header_cardinality=False`.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
df : pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
batch_size: int
The number of rows to save at a time. Default 50
use_header_cardinality: bool
If True, then the header cardinality will be used to save the data. Default True
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_property_graph(
... df=df
... )
"""
# check if ~id and ~label column exist and if not throw error
g = Graph().traversal()
is_edge_df = False
is_update_df = True
if "~id" in df.columns:
if "~label" in df.columns:
is_update_df = False
if "~to" in df.columns and "~from" in df.columns:
is_edge_df = True
else:
raise exceptions.InvalidArgumentValue(
"Dataframe must contain at least a ~id and a ~label column to be saved to Amazon Neptune"
)
# Loop through items in the DF
for (index, row) in df.iterrows():
# build up a query
if is_update_df:
g = _build_gremlin_update(g, row, use_header_cardinality)
elif is_edge_df:
g = _build_gremlin_insert_edges(g, row.to_dict(), use_header_cardinality)
else:
g = _build_gremlin_insert_vertices(g, row.to_dict(), use_header_cardinality)
# run the query
if index > 0 and index % batch_size == 0:
res = _run_gremlin_insert(client, g)
if res:
g = Graph().traversal()
return _run_gremlin_insert(client, g)
def to_rdf_graph(
client: NeptuneClient,
df: pd.DataFrame,
batch_size: int = 50,
subject_column: str = "s",
predicate_column: str = "p",
object_column: str = "o",
graph_column: str = "g",
) -> bool:
"""Write records stored in a DataFrame into Amazon Neptune.
The DataFrame must consist of triples with column names for the subject, predicate, and object specified.
If you want to add data into a named graph then you will also need the graph column.
Parameters
----------
client (NeptuneClient) :
instance of the neptune client to use
df (pandas.DataFrame) :
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
subject_column (str, optional) :
The column name in the dataframe for the subject. Defaults to 's'
predicate_column (str, optional) :
The column name in the dataframe for the predicate. Defaults to 'p'
object_column (str, optional) :
The column name in the dataframe for the object. Defaults to 'o'
graph_column (str, optional) :
The column name in the dataframe for the graph if sending across quads. Defaults to 'g'
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_rdf_graph(
... df=df
... )
"""
is_quads = False
if pd.Series([subject_column, object_column, predicate_column]).isin(df.columns).all():
if graph_column in df.columns:
is_quads = True
else:
raise exceptions.InvalidArgumentValue(
"""Dataframe must contain at least the subject, predicate, and object columns defined or the defaults
(s, p, o) to be saved to Amazon Neptune"""
)
query = ""
# Loop through items in the DF
for (index, row) in df.iterrows():
# build up a query
if is_quads:
insert = f"""INSERT DATA {{ GRAPH <{row[graph_column]}> {{<{row[subject_column]}>
<{str(row[predicate_column])}> <{row[object_column]}> . }} }}; """
query = query + insert
else:
insert = f"""INSERT DATA {{ <{row[subject_column]}> <{str(row[predicate_column])}>
<{row[object_column]}> . }}; """
query = query + insert
# run the query
if index > 0 and index % batch_size == 0:
res = client.write_sparql(query)
if res:
query = ""
return client.write_sparql(query)
def connect(host: str, port: int, iam_enabled: bool = False, **kwargs: Any) -> NeptuneClient:
"""Create a connection to a Neptune cluster.
Parameters
----------
host : str
The host endpoint to connect to
port : int
The port endpoint to connect to
iam_enabled : bool, optional
True if IAM is enabled on the cluster. Defaults to False.
Returns
-------
NeptuneClient
[description]
"""
return NeptuneClient(host, port, iam_enabled, **kwargs)
def _get_column_name(column: str) -> str:
if "(single)" in column.lower():
return re.compile(r"\(single\)", re.IGNORECASE).sub("", column)
return column
def _set_properties(g: GraphTraversalSource, use_header_cardinality: bool, row: Any) -> GraphTraversalSource:
for (column, value) in row.items():
if column not in ["~id", "~label", "~to", "~from"]:
# If the column header is specifying the cardinality then use it
if use_header_cardinality:
if column.lower().find("(single)") > 0 and pd.notna(value):
g = g.property(Cardinality.single, _get_column_name(column), value)
else:
g = _expand_properties(g, _get_column_name(column), value)
else:
# If not using header cardinality then use the default of set
g = _expand_properties(g, column, value)
return g
def _expand_properties(g: GraphTraversalSource, column: str, value: Any) -> GraphTraversalSource:
# If this is a list then expand it out into multiple property calls
if isinstance(value, list) and len(value) > 0:
for item in value:
g = g.property(Cardinality.set_, column, item)
elif pd.notna(value):
g = g.property(Cardinality.set_, column, value)
return g
def _build_gremlin_update(g: GraphTraversalSource, row: Any, use_header_cardinality: bool) -> GraphTraversalSource:
g = g.V(str(row["~id"]))
g = _set_properties(g, use_header_cardinality, row)
return g
def _build_gremlin_insert_vertices(
g: GraphTraversalSource, row: Any, use_header_cardinality: bool = False
) -> GraphTraversalSource:
g = g.V(str(row["~id"])).fold().coalesce(__.unfold(), __.addV(row["~label"]).property(T.id, str(row["~id"])))
g = _set_properties(g, use_header_cardinality, row)
return g
def _build_gremlin_insert_edges(
g: GraphTraversalSource, row: pd.Series, use_header_cardinality: bool
) -> GraphTraversalSource:
g = (
g.V(str(row["~from"]))
.fold()
.coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {"~id": row["~from"], "~label": "Vertex"}))
.addE(row["~label"])
.property(T.id, str(row["~id"]))
.to(
__.V(str(row["~to"]))
.fold()
.coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {"~id": row["~to"], "~label": "Vertex"}))
)
)
g = _set_properties(g, use_header_cardinality, row)
return g
def _run_gremlin_insert(client: NeptuneClient, g: GraphTraversalSource) -> bool:
translator = Translator("g")
s = translator.translate(g.bytecode)
s = s.replace("Cardinality.", "") # hack to fix parser error for set cardinality
_logger.debug(s)
res = client.write_gremlin(s)
return res
# MASKED: flatten_nested_df function (lines 356-416)
|
def flatten_nested_df(
df: pd.DataFrame, include_prefix: bool = True, seperator: str = "_", recursive: bool = True
) -> pd.DataFrame:
"""Flatten the lists and dictionaries of the input data frame.
Parameters
----------
df : pd.DataFrame
The input data frame
include_prefix : bool, optional
If True, then it will prefix the new column name with the original column name.
Defaults to True.
seperator : str, optional
The seperator to use between field names when a dictionary is exploded.
Defaults to "_".
recursive : bool, optional
If True, then this will recurse the fields in the data frame. Defaults to True.
Returns
-------
pd.DataFrame: The flattened data frame
"""
if seperator is None:
seperator = "_"
df = df.reset_index()
# search for list and map
s = (df.applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df.applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if len(list_columns) > 0 or len(dict_columns) > 0:
new_columns = []
for col in dict_columns:
# expand dictionaries horizontally
expanded = None
if include_prefix:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f"{col}{seperator}")
else:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f"{seperator}")
expanded.index = df.index
df = pd.concat([df, expanded], axis=1).drop(columns=[col])
new_columns.extend(expanded.columns)
for col in list_columns:
df = df.drop(columns=[col]).join(df[col].explode().to_frame())
new_columns.append(col)
# check if there are still dict o list fields to flatten
s = (df[new_columns].applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df[new_columns].applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if recursive and (len(list_columns) > 0 or len(dict_columns) > 0):
df = flatten_nested_df(df, include_prefix=include_prefix, seperator=seperator, recursive=recursive)
return df
| 356
| 416
|
"""Amazon Neptune Module."""
import logging
import re
from typing import Any
import pandas as pd
from gremlin_python.process.graph_traversal import GraphTraversalSource, __
from gremlin_python.process.translator import Translator
from gremlin_python.process.traversal import Cardinality, T
from gremlin_python.structure.graph import Graph
from awswrangler import exceptions
from awswrangler.neptune.client import NeptuneClient
_logger: logging.Logger = logging.getLogger(__name__)
def execute_gremlin(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a Gremlin traversal as pandas dataframe.
Parameters
----------
client : neptune.Client
instance of the neptune client to use
traversal : str
The gremlin traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a Gremlin Query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_gremlin(client, "g.V().limit(1)")
"""
results = client.read_gremlin(query)
df = pd.DataFrame.from_records(results)
return df
def execute_opencypher(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a openCypher traversal as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The openCypher query to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run an openCypher query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> resp = wr.neptune.execute_opencypher(client, "MATCH (n) RETURN n LIMIT 1")
"""
resp = client.read_opencypher(query)
df = pd.DataFrame.from_dict(resp)
return df
def execute_sparql(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a SPARQL query as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The SPARQL traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a SPARQL query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_sparql(client, "PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE {
?person foaf:name ?name .
"""
data = client.read_sparql(query)
df = None
if "results" in data and "bindings" in data["results"]:
df = pd.DataFrame(data["results"]["bindings"])
df.applymap(lambda x: x["value"])
else:
df = pd.DataFrame(data)
return df
def to_property_graph(
client: NeptuneClient, df: pd.DataFrame, batch_size: int = 50, use_header_cardinality: bool = True
) -> bool:
"""Write records stored in a DataFrame into Amazon Neptune.
If writing to a property graph then DataFrames for vertices and edges must be written separately.
DataFrames for vertices must have a ~label column with the label and a ~id column for the vertex id.
If the ~id column does not exist, the specified id does not exists, or is empty then a new vertex will be added.
If no ~label column exists an exception will be thrown.
DataFrames for edges must have a ~id, ~label, ~to, and ~from column. If the ~id column does not exist
the specified id does not exists, or is empty then a new edge will be added. If no ~label, ~to, or ~from column
exists an exception will be thrown.
If you would like to save data using `single` cardinality then you can postfix (single) to the column header and
set use_header_cardinality=True (default). e.g. A column named `name(single)` will save the `name` property
as single
cardinality. You can disable this by setting by setting `use_header_cardinality=False`.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
df : pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
batch_size: int
The number of rows to save at a time. Default 50
use_header_cardinality: bool
If True, then the header cardinality will be used to save the data. Default True
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_property_graph(
... df=df
... )
"""
# check if ~id and ~label column exist and if not throw error
g = Graph().traversal()
is_edge_df = False
is_update_df = True
if "~id" in df.columns:
if "~label" in df.columns:
is_update_df = False
if "~to" in df.columns and "~from" in df.columns:
is_edge_df = True
else:
raise exceptions.InvalidArgumentValue(
"Dataframe must contain at least a ~id and a ~label column to be saved to Amazon Neptune"
)
# Loop through items in the DF
for (index, row) in df.iterrows():
# build up a query
if is_update_df:
g = _build_gremlin_update(g, row, use_header_cardinality)
elif is_edge_df:
g = _build_gremlin_insert_edges(g, row.to_dict(), use_header_cardinality)
else:
g = _build_gremlin_insert_vertices(g, row.to_dict(), use_header_cardinality)
# run the query
if index > 0 and index % batch_size == 0:
res = _run_gremlin_insert(client, g)
if res:
g = Graph().traversal()
return _run_gremlin_insert(client, g)
def to_rdf_graph(
client: NeptuneClient,
df: pd.DataFrame,
batch_size: int = 50,
subject_column: str = "s",
predicate_column: str = "p",
object_column: str = "o",
graph_column: str = "g",
) -> bool:
"""Write records stored in a DataFrame into Amazon Neptune.
The DataFrame must consist of triples with column names for the subject, predicate, and object specified.
If you want to add data into a named graph then you will also need the graph column.
Parameters
----------
client (NeptuneClient) :
instance of the neptune client to use
df (pandas.DataFrame) :
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
subject_column (str, optional) :
The column name in the dataframe for the subject. Defaults to 's'
predicate_column (str, optional) :
The column name in the dataframe for the predicate. Defaults to 'p'
object_column (str, optional) :
The column name in the dataframe for the object. Defaults to 'o'
graph_column (str, optional) :
The column name in the dataframe for the graph if sending across quads. Defaults to 'g'
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_rdf_graph(
... df=df
... )
"""
is_quads = False
if pd.Series([subject_column, object_column, predicate_column]).isin(df.columns).all():
if graph_column in df.columns:
is_quads = True
else:
raise exceptions.InvalidArgumentValue(
"""Dataframe must contain at least the subject, predicate, and object columns defined or the defaults
(s, p, o) to be saved to Amazon Neptune"""
)
query = ""
# Loop through items in the DF
for (index, row) in df.iterrows():
# build up a query
if is_quads:
insert = f"""INSERT DATA {{ GRAPH <{row[graph_column]}> {{<{row[subject_column]}>
<{str(row[predicate_column])}> <{row[object_column]}> . }} }}; """
query = query + insert
else:
insert = f"""INSERT DATA {{ <{row[subject_column]}> <{str(row[predicate_column])}>
<{row[object_column]}> . }}; """
query = query + insert
# run the query
if index > 0 and index % batch_size == 0:
res = client.write_sparql(query)
if res:
query = ""
return client.write_sparql(query)
def connect(host: str, port: int, iam_enabled: bool = False, **kwargs: Any) -> NeptuneClient:
"""Create a connection to a Neptune cluster.
Parameters
----------
host : str
The host endpoint to connect to
port : int
The port endpoint to connect to
iam_enabled : bool, optional
True if IAM is enabled on the cluster. Defaults to False.
Returns
-------
NeptuneClient
[description]
"""
return NeptuneClient(host, port, iam_enabled, **kwargs)
def _get_column_name(column: str) -> str:
if "(single)" in column.lower():
return re.compile(r"\(single\)", re.IGNORECASE).sub("", column)
return column
def _set_properties(g: GraphTraversalSource, use_header_cardinality: bool, row: Any) -> GraphTraversalSource:
for (column, value) in row.items():
if column not in ["~id", "~label", "~to", "~from"]:
# If the column header is specifying the cardinality then use it
if use_header_cardinality:
if column.lower().find("(single)") > 0 and pd.notna(value):
g = g.property(Cardinality.single, _get_column_name(column), value)
else:
g = _expand_properties(g, _get_column_name(column), value)
else:
# If not using header cardinality then use the default of set
g = _expand_properties(g, column, value)
return g
def _expand_properties(g: GraphTraversalSource, column: str, value: Any) -> GraphTraversalSource:
# If this is a list then expand it out into multiple property calls
if isinstance(value, list) and len(value) > 0:
for item in value:
g = g.property(Cardinality.set_, column, item)
elif pd.notna(value):
g = g.property(Cardinality.set_, column, value)
return g
def _build_gremlin_update(g: GraphTraversalSource, row: Any, use_header_cardinality: bool) -> GraphTraversalSource:
g = g.V(str(row["~id"]))
g = _set_properties(g, use_header_cardinality, row)
return g
def _build_gremlin_insert_vertices(
g: GraphTraversalSource, row: Any, use_header_cardinality: bool = False
) -> GraphTraversalSource:
g = g.V(str(row["~id"])).fold().coalesce(__.unfold(), __.addV(row["~label"]).property(T.id, str(row["~id"])))
g = _set_properties(g, use_header_cardinality, row)
return g
def _build_gremlin_insert_edges(
g: GraphTraversalSource, row: pd.Series, use_header_cardinality: bool
) -> GraphTraversalSource:
g = (
g.V(str(row["~from"]))
.fold()
.coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {"~id": row["~from"], "~label": "Vertex"}))
.addE(row["~label"])
.property(T.id, str(row["~id"]))
.to(
__.V(str(row["~to"]))
.fold()
.coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {"~id": row["~to"], "~label": "Vertex"}))
)
)
g = _set_properties(g, use_header_cardinality, row)
return g
def _run_gremlin_insert(client: NeptuneClient, g: GraphTraversalSource) -> bool:
translator = Translator("g")
s = translator.translate(g.bytecode)
s = s.replace("Cardinality.", "") # hack to fix parser error for set cardinality
_logger.debug(s)
res = client.write_gremlin(s)
return res
def flatten_nested_df(
df: pd.DataFrame, include_prefix: bool = True, seperator: str = "_", recursive: bool = True
) -> pd.DataFrame:
"""Flatten the lists and dictionaries of the input data frame.
Parameters
----------
df : pd.DataFrame
The input data frame
include_prefix : bool, optional
If True, then it will prefix the new column name with the original column name.
Defaults to True.
seperator : str, optional
The seperator to use between field names when a dictionary is exploded.
Defaults to "_".
recursive : bool, optional
If True, then this will recurse the fields in the data frame. Defaults to True.
Returns
-------
pd.DataFrame: The flattened data frame
"""
if seperator is None:
seperator = "_"
df = df.reset_index()
# search for list and map
s = (df.applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df.applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if len(list_columns) > 0 or len(dict_columns) > 0:
new_columns = []
for col in dict_columns:
# expand dictionaries horizontally
expanded = None
if include_prefix:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f"{col}{seperator}")
else:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f"{seperator}")
expanded.index = df.index
df = pd.concat([df, expanded], axis=1).drop(columns=[col])
new_columns.extend(expanded.columns)
for col in list_columns:
df = df.drop(columns=[col]).join(df[col].explode().to_frame())
new_columns.append(col)
# check if there are still dict o list fields to flatten
s = (df[new_columns].applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df[new_columns].applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if recursive and (len(list_columns) > 0 or len(dict_columns) > 0):
df = flatten_nested_df(df, include_prefix=include_prefix, seperator=seperator, recursive=recursive)
return df
|
validate
|
Validate block
1. check block index (is the next block in the blockchain state)
2. check previous hash (is the hash of the previous block)
3. check forger wallet (is lottery member?)
4. check block signature
5. validate transactions
:param is_test_net: if True ignore InsufficientBalanceError and NonLotteryMemberError
:param blockchain_state: Blockchain state object
:raises ValidationError
:return: None
|
from typing import List
import json
import hashlib
from time import time
from base64 import b64decode, b64encode
import ecdsa
from config import ECDSA_CURVE
from .constants import BLOCK_COUNT_FREEZE_WALLET_LOTTERY_AFTER_WIN, DEVELOPER_KEY
from .transaction import Transaction
from .exceptions import (
ValidationError,
NonLotteryMemberError,
WalletLotteryFreezeError,
GenesisIsNotValidError,
NonSequentialBlockIndexError,
NonMatchingHashError
)
class Block:
def __init__(
self,
index,
previous_hash,
timestamp=None,
forger=None,
transactions: List[Transaction] = None,
signature=None,
**kwargs,
):
"""
Create block
:param index: the block index at the chain (0 for the genesis block and so on)
:param previous_hash: hash of previous block
:param timestamp: block creation time
:param forger: public_address of forger wallet
:param transactions: list of transactions
:param signature: signature of the block hash by the forger
"""
if timestamp is None:
timestamp = time()
if transactions is None:
transactions = []
self.index = index
self.previous_hash = previous_hash
self.timestamp = timestamp
self.forger = forger
self.transactions = transactions
self.signature = signature
@property
def forger_public_key(self) -> ecdsa.VerifyingKey:
forger_public_key_string = bytes.fromhex(self.forger)
return ecdsa.VerifyingKey.from_string(forger_public_key_string, curve=ECDSA_CURVE)
def _raw_data(self):
return {
"index": self.index,
"timestamp": self.timestamp,
"transactions": sorted([
transaction.to_dict() for transaction in self.transactions
], key=lambda t: t["nonce"]),
"previous_hash": self.previous_hash,
"forger": self.forger,
}
def hash(self):
"""
Calculate the block hash (block number, previous hash, transactions)
:return: String hash of block data (hex)
"""
block_dict = self._raw_data()
# We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes
block_string = json.dumps(block_dict, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
def to_dict(self):
return {
**self._raw_data(),
"hash": self.hash(),
"signature": b64encode(self.signature).decode(),
}
def add_transaction(self, transaction: Transaction):
"""
Add transaction to block
:param transaction: Transaction object (see transaction.py)
:raise Validation error if transaction isn't valid.
:return: None
"""
self.transactions.append(transaction)
def is_signature_verified(self) -> bool:
"""
Check if block signature is valid
:return: bool
"""
try:
return self.forger_public_key.verify(self.signature, self.hash().encode())
except ecdsa.BadSignatureError:
return False
def create_signature(self, forger_private_address: str):
"""
Create block signature for this block
:param forger_private_address: base64(wallet private address)
:return: None
"""
forger_private_key_string = bytes.fromhex(forger_private_address)
forger_private_key = ecdsa.SigningKey.from_string(forger_private_key_string, curve=ECDSA_CURVE)
if forger_private_key.get_verifying_key() != self.forger_public_key:
raise ValueError("The forger is not the one signing")
self.signature = self.sign(forger_private_key)
def sign(self, forger_private_key: ecdsa.SigningKey):
return forger_private_key.sign(self.hash().encode())
# MASKED: validate function (lines 121-156)
# TODO: Add timestamp validation
@classmethod
def from_dict(
cls,
index: int,
previous_hash,
forger,
transactions: dict,
signature: str,
**kwargs,
):
transactions = list(map(lambda t: Transaction.from_dict(**t), transactions))
signature = b64decode(signature.encode())
return cls(
index=index,
previous_hash=previous_hash,
forger=forger,
transactions=transactions,
signature=signature,
**kwargs,
)
def __getitem__(self, item):
return getattr(self, item)
|
def validate(self, blockchain_state, is_test_net=False):
"""
Validate block
1. check block index (is the next block in the blockchain state)
2. check previous hash (is the hash of the previous block)
3. check forger wallet (is lottery member?)
4. check block signature
5. validate transactions
:param is_test_net: if True ignore InsufficientBalanceError and NonLotteryMemberError
:param blockchain_state: Blockchain state object
:raises ValidationError
:return: None
"""
if self.index == 0 and blockchain_state.length == 0:
genesis_is_valid = self.forger == DEVELOPER_KEY and self.is_signature_verified()
if not genesis_is_valid:
raise GenesisIsNotValidError()
return
# TODO: check in production if hash if equal to hard coded hash
if self.index != blockchain_state.length:
raise NonSequentialBlockIndexError(
f"block index not sequential index: {self.index} chain: {blockchain_state.length}"
)
if self.previous_hash != blockchain_state.last_block_hash:
raise NonMatchingHashError("previous hash not match previous block hash")
forger_wallet = blockchain_state.wallets.get(self.forger, None)
if forger_wallet is None or forger_wallet.balance < 100:
if not is_test_net:
raise NonLotteryMemberError()
if not self.is_signature_verified():
raise ValidationError("invalid signature")
for transaction in self.transactions:
transaction.validate(
blockchain_state=blockchain_state, is_test_net=is_test_net
) # raises ValidationError
| 121
| 156
|
from typing import List
import json
import hashlib
from time import time
from base64 import b64decode, b64encode
import ecdsa
from config import ECDSA_CURVE
from .constants import BLOCK_COUNT_FREEZE_WALLET_LOTTERY_AFTER_WIN, DEVELOPER_KEY
from .transaction import Transaction
from .exceptions import (
ValidationError,
NonLotteryMemberError,
WalletLotteryFreezeError,
GenesisIsNotValidError,
NonSequentialBlockIndexError,
NonMatchingHashError
)
class Block:
def __init__(
self,
index,
previous_hash,
timestamp=None,
forger=None,
transactions: List[Transaction] = None,
signature=None,
**kwargs,
):
"""
Create block
:param index: the block index at the chain (0 for the genesis block and so on)
:param previous_hash: hash of previous block
:param timestamp: block creation time
:param forger: public_address of forger wallet
:param transactions: list of transactions
:param signature: signature of the block hash by the forger
"""
if timestamp is None:
timestamp = time()
if transactions is None:
transactions = []
self.index = index
self.previous_hash = previous_hash
self.timestamp = timestamp
self.forger = forger
self.transactions = transactions
self.signature = signature
@property
def forger_public_key(self) -> ecdsa.VerifyingKey:
forger_public_key_string = bytes.fromhex(self.forger)
return ecdsa.VerifyingKey.from_string(forger_public_key_string, curve=ECDSA_CURVE)
def _raw_data(self):
return {
"index": self.index,
"timestamp": self.timestamp,
"transactions": sorted([
transaction.to_dict() for transaction in self.transactions
], key=lambda t: t["nonce"]),
"previous_hash": self.previous_hash,
"forger": self.forger,
}
def hash(self):
"""
Calculate the block hash (block number, previous hash, transactions)
:return: String hash of block data (hex)
"""
block_dict = self._raw_data()
# We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes
block_string = json.dumps(block_dict, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
def to_dict(self):
return {
**self._raw_data(),
"hash": self.hash(),
"signature": b64encode(self.signature).decode(),
}
def add_transaction(self, transaction: Transaction):
"""
Add transaction to block
:param transaction: Transaction object (see transaction.py)
:raise Validation error if transaction isn't valid.
:return: None
"""
self.transactions.append(transaction)
def is_signature_verified(self) -> bool:
"""
Check if block signature is valid
:return: bool
"""
try:
return self.forger_public_key.verify(self.signature, self.hash().encode())
except ecdsa.BadSignatureError:
return False
def create_signature(self, forger_private_address: str):
"""
Create block signature for this block
:param forger_private_address: base64(wallet private address)
:return: None
"""
forger_private_key_string = bytes.fromhex(forger_private_address)
forger_private_key = ecdsa.SigningKey.from_string(forger_private_key_string, curve=ECDSA_CURVE)
if forger_private_key.get_verifying_key() != self.forger_public_key:
raise ValueError("The forger is not the one signing")
self.signature = self.sign(forger_private_key)
def sign(self, forger_private_key: ecdsa.SigningKey):
return forger_private_key.sign(self.hash().encode())
def validate(self, blockchain_state, is_test_net=False):
"""
Validate block
1. check block index (is the next block in the blockchain state)
2. check previous hash (is the hash of the previous block)
3. check forger wallet (is lottery member?)
4. check block signature
5. validate transactions
:param is_test_net: if True ignore InsufficientBalanceError and NonLotteryMemberError
:param blockchain_state: Blockchain state object
:raises ValidationError
:return: None
"""
if self.index == 0 and blockchain_state.length == 0:
genesis_is_valid = self.forger == DEVELOPER_KEY and self.is_signature_verified()
if not genesis_is_valid:
raise GenesisIsNotValidError()
return
# TODO: check in production if hash if equal to hard coded hash
if self.index != blockchain_state.length:
raise NonSequentialBlockIndexError(
f"block index not sequential index: {self.index} chain: {blockchain_state.length}"
)
if self.previous_hash != blockchain_state.last_block_hash:
raise NonMatchingHashError("previous hash not match previous block hash")
forger_wallet = blockchain_state.wallets.get(self.forger, None)
if forger_wallet is None or forger_wallet.balance < 100:
if not is_test_net:
raise NonLotteryMemberError()
if not self.is_signature_verified():
raise ValidationError("invalid signature")
for transaction in self.transactions:
transaction.validate(
blockchain_state=blockchain_state, is_test_net=is_test_net
) # raises ValidationError
# TODO: Add timestamp validation
@classmethod
def from_dict(
cls,
index: int,
previous_hash,
forger,
transactions: dict,
signature: str,
**kwargs,
):
transactions = list(map(lambda t: Transaction.from_dict(**t), transactions))
signature = b64decode(signature.encode())
return cls(
index=index,
previous_hash=previous_hash,
forger=forger,
transactions=transactions,
signature=signature,
**kwargs,
)
def __getitem__(self, item):
return getattr(self, item)
|
__init__
|
AddressTokensTransactionUnconfirmedOmnilayertoken - a model defined in OpenAPI
Args:
name (str): Specifies the name of the token.
property_id (str): Defines the ID of the property for Omni Layer.
transaction_type (str): Defines the type of the transaction made.
created_by_transaction_id (str): The transaction ID used to create the token.
amount (str): Defines the amount of tokens sent with the transaction that is pending confirmation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class AddressTokensTransactionUnconfirmedOmnilayertoken(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'property_id': (str,), # noqa: E501
'transaction_type': (str,), # noqa: E501
'created_by_transaction_id': (str,), # noqa: E501
'amount': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'property_id': 'propertyId', # noqa: E501
'transaction_type': 'transactionType', # noqa: E501
'created_by_transaction_id': 'createdByTransactionId', # noqa: E501
'amount': 'amount', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
# MASKED: __init__ function (lines 107-186)
|
@convert_js_args_to_python_args
def __init__(self, name, property_id, transaction_type, created_by_transaction_id, amount, *args, **kwargs): # noqa: E501
"""AddressTokensTransactionUnconfirmedOmnilayertoken - a model defined in OpenAPI
Args:
name (str): Specifies the name of the token.
property_id (str): Defines the ID of the property for Omni Layer.
transaction_type (str): Defines the type of the transaction made.
created_by_transaction_id (str): The transaction ID used to create the token.
amount (str): Defines the amount of tokens sent with the transaction that is pending confirmation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.property_id = property_id
self.transaction_type = transaction_type
self.created_by_transaction_id = created_by_transaction_id
self.amount = amount
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 107
| 186
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class AddressTokensTransactionUnconfirmedOmnilayertoken(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'property_id': (str,), # noqa: E501
'transaction_type': (str,), # noqa: E501
'created_by_transaction_id': (str,), # noqa: E501
'amount': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'property_id': 'propertyId', # noqa: E501
'transaction_type': 'transactionType', # noqa: E501
'created_by_transaction_id': 'createdByTransactionId', # noqa: E501
'amount': 'amount', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, property_id, transaction_type, created_by_transaction_id, amount, *args, **kwargs): # noqa: E501
"""AddressTokensTransactionUnconfirmedOmnilayertoken - a model defined in OpenAPI
Args:
name (str): Specifies the name of the token.
property_id (str): Defines the ID of the property for Omni Layer.
transaction_type (str): Defines the type of the transaction made.
created_by_transaction_id (str): The transaction ID used to create the token.
amount (str): Defines the amount of tokens sent with the transaction that is pending confirmation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.property_id = property_id
self.transaction_type = transaction_type
self.created_by_transaction_id = created_by_transaction_id
self.amount = amount
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
get_schema
|
A summary of information based on the results of executing a TestScript.
id: unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
action: The teardown action will only contain an operation.
|
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class TestReport_TeardownSchema:
"""
A summary of information based on the results of executing a TestScript.
"""
# noinspection PyDefaultArgument
# MASKED: get_schema function (lines 14-122)
|
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
A summary of information based on the results of executing a TestScript.
id: unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
action: The teardown action will only contain an operation.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.testreport_action2 import (
TestReport_Action2Schema,
)
if (
max_recursion_limit
and nesting_list.count("TestReport_Teardown") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["TestReport_Teardown"]
schema = StructType(
[
# unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The teardown action will only contain an operation.
StructField(
"action",
ArrayType(
TestReport_Action2Schema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| 14
| 122
|
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class TestReport_TeardownSchema:
"""
A summary of information based on the results of executing a TestScript.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
A summary of information based on the results of executing a TestScript.
id: unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
action: The teardown action will only contain an operation.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.testreport_action2 import (
TestReport_Action2Schema,
)
if (
max_recursion_limit
and nesting_list.count("TestReport_Teardown") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["TestReport_Teardown"]
schema = StructType(
[
# unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The teardown action will only contain an operation.
StructField(
"action",
ArrayType(
TestReport_Action2Schema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
|
genseq2
|
generate a sequences library based of wtseq
@param: list of tupel, [ (resid, library), (resid, library), ...]
@returns: list of sequences
|
#!/usr/bin/env python
"""
Generate Sequence from a pdbfile and to modify the squences.
Author: {0} ({1})
This module is part of CADEE, the framework for
Computer-Aided Directed Evolution of Enzymes.
"""
from __future__ import print_function
import logging
import os
import sys
import time
import config
__author__ = "Beat Amrein"
__email__ = "beat.amrein@gmail.com"
logger = logging.getLogger('prep.genseqs')
# ERROR/EXIT CODES
ERR_USAGE = 1
ERR_OUTPUTFOLDER_EXISTS = 2
ERR_TOPO_GENERATION_WT = 3
ERR_QPREP5_INEXISTENT = 4
ERR_MKTOP_INEXISTENT = 5
ERR_NO_BABEL = 6
# CONSTANTS
NLC = '\n'
# MASKED: genseq2 function (lines 39-76)
def combine(lib, pos):
"""generate combinations of up to 7.
@param lib: library
@param pos: positions to mutate
# TODO: implement in readable (recursively)
"""
numseqs = 1
for each in lib:
numseqs *= len(each)
logger.info('Generating %s %s', numseqs, 'sequeces. Please wait.')
seqlib = []
logger.info('Library %s, Positions %s', lib, pos)
for every in lib[0]:
if len(pos) > 1:
for every2, in lib[1]:
if len(pos) > 2:
for every3, in lib[2]:
if len(pos) > 3:
for every4, in lib[3]:
if len(pos) > 4:
for every5, in lib[4]:
if len(pos) > 5:
for every6, in lib[5]:
if len(pos) > 6:
for every7 in lib[6]:
seqlib.append([every,
every2,
every3,
every4,
every5,
every6,
every7])
else:
seqlib.append([every,
every2,
every3,
every4,
every5,
every6])
else:
seqlib.append([every,
every2,
every3,
every4,
every5])
else:
seqlib.append([every, every2, every3,
every4, every4])
else:
seqlib.append([every, every2, every3])
else:
seqlib.append([every, every2])
else:
seqlib.append([every])
return seqlib
def gen_seqlib(sequence, pos, lib):
"""
Generates sequences, mutating at pos[x] to all as in lib[x]
Generates sequences, mutating at pos[x] if len(lib)==1,
the same lib will be used for all
Return sequences
"""
# is lib a string?
if isinstance(lib, str):
lib = [lib]
# when only 1 library is given, reuse it
if len(lib) == 1:
while range(1, len(pos)):
lib.append(lib[0])
if len(pos) != len(lib):
msg = 'Bad Input: Dimensions of pos and lib must be equal: '
msg += 'found: #pos: {0}, #lib {1}'.format(len(pos), len(lib))
raise (Exception, msg)
seqlib = combine(lib, pos)
# insert combinations into sequence
sequences_1d = {}
for i in range(0, len(seqlib)):
nfa = list(sequence)
for j, posj in pos:
if nfa[posj].upper() != seqlib[i][j].upper():
nfa[posj] = seqlib[i][j]
modseq = ''.join(nfa)
sequences_1d[modseq] = 1
return sequences_1d
def get_fasta(wtpdb):
"""Return fasta code of wtpdb"""
# preparations
from pyscwrl import babel_pdb_for_scwrl
babel_pdb_for_scwrl(wtpdb)
# read fasta
fasta = ''
for line in open('proper.fasta'):
line = line[:-1]
if line[0] == '>':
# fasta-comment, ignore line
continue
for char in line:
fasta += char.lower()
return fasta
def get_sequences(wtpdb, resids, library):
"""Return list of sequences for resids, created with library"""
print(wtpdb, resids)
# Get the fasta sequence from pdbfile
fasta = get_fasta(wtpdb)
posids = []
# position - ids start from 0 (not 1), so we have to convert
for resid in resids:
posids.append(int(resid)-1)
# generate sequences:
sequences = gen_seqlib(fasta, posids, [library])
return sequences
if __name__ == "__main__":
# Parse Command Line
LIB = config.SatLibs.ALL
def usage():
"""Print Usage and exit"""
print('')
print('Usage:')
print(' ' + sys.argv[0] + ' qprep-wt.pdb res1 [ res2 ...] ]')
print('')
sys.exit(ERR_USAGE)
def get_resnumbers(args):
"""Return residue-numbers as list-of-integers"""
resids = []
for resid in args:
try:
resids.append(int(resid))
except ValueError:
print('ValueError with ', resid, ' expected: Integer')
usage()
if len(resids) > 7:
print('FATAL:')
print('You ask me to mutate more than 7 residues at one time.')
print('This is NOT IMPLEMENTED... ...probably a BAD IDEA :')
print('This is a bad idea, because we grow with LIBRARY^{#RES}!')
print('In your case ', len(LIB), '^', len(LIB), '=',
len(LIB)**len(resids), '!')
usage()
return resids
START = time.time()
if len(sys.argv) < 3:
usage()
if len(get_resnumbers) > 7:
usage()
get_sequences(os.path.abspath(sys.argv[1]),
get_resnumbers(sys.argv[2:]), LIB)
print('time', round(time.time()-START, 2), 's')
|
def genseq2(wtseq, mutations, keepdupes=False):
""" generate a sequences library based of wtseq
@param: list of tupel, [ (resid, library), (resid, library), ...]
@returns: list of sequences
"""
def estimator(mutations):
est = 1
for mut in mutations:
lib = mut[1]
est *= (len(lib)+1)
return est
logger.info('will mutate wtseq %s and create about %s mutations',
wtseq, estimator(mutations))
seqo = list(wtseq)
sequences = [seqo]
while len(mutations) > 0:
newseqs = sequences[:]
res, lib = mutations.pop()
for seqo in sequences:
res = int(res)
if res < 1:
raise ValueError('Impossible: resid < 1!', res)
pos = res - 1
for aa in lib:
if len(aa) != 1:
raise ValueError('Impossible 1-letter aminoacid',
aa, 'in lib', lib)
seqn = seqo[:]
seqn[pos] = aa
if keepdupes or seqn not in newseqs:
newseqs.append(seqn)
sequences = newseqs
return sequences
| 39
| 76
|
#!/usr/bin/env python
"""
Generate Sequence from a pdbfile and to modify the squences.
Author: {0} ({1})
This module is part of CADEE, the framework for
Computer-Aided Directed Evolution of Enzymes.
"""
from __future__ import print_function
import logging
import os
import sys
import time
import config
__author__ = "Beat Amrein"
__email__ = "beat.amrein@gmail.com"
logger = logging.getLogger('prep.genseqs')
# ERROR/EXIT CODES
ERR_USAGE = 1
ERR_OUTPUTFOLDER_EXISTS = 2
ERR_TOPO_GENERATION_WT = 3
ERR_QPREP5_INEXISTENT = 4
ERR_MKTOP_INEXISTENT = 5
ERR_NO_BABEL = 6
# CONSTANTS
NLC = '\n'
def genseq2(wtseq, mutations, keepdupes=False):
""" generate a sequences library based of wtseq
@param: list of tupel, [ (resid, library), (resid, library), ...]
@returns: list of sequences
"""
def estimator(mutations):
est = 1
for mut in mutations:
lib = mut[1]
est *= (len(lib)+1)
return est
logger.info('will mutate wtseq %s and create about %s mutations',
wtseq, estimator(mutations))
seqo = list(wtseq)
sequences = [seqo]
while len(mutations) > 0:
newseqs = sequences[:]
res, lib = mutations.pop()
for seqo in sequences:
res = int(res)
if res < 1:
raise ValueError('Impossible: resid < 1!', res)
pos = res - 1
for aa in lib:
if len(aa) != 1:
raise ValueError('Impossible 1-letter aminoacid',
aa, 'in lib', lib)
seqn = seqo[:]
seqn[pos] = aa
if keepdupes or seqn not in newseqs:
newseqs.append(seqn)
sequences = newseqs
return sequences
def combine(lib, pos):
"""generate combinations of up to 7.
@param lib: library
@param pos: positions to mutate
# TODO: implement in readable (recursively)
"""
numseqs = 1
for each in lib:
numseqs *= len(each)
logger.info('Generating %s %s', numseqs, 'sequeces. Please wait.')
seqlib = []
logger.info('Library %s, Positions %s', lib, pos)
for every in lib[0]:
if len(pos) > 1:
for every2, in lib[1]:
if len(pos) > 2:
for every3, in lib[2]:
if len(pos) > 3:
for every4, in lib[3]:
if len(pos) > 4:
for every5, in lib[4]:
if len(pos) > 5:
for every6, in lib[5]:
if len(pos) > 6:
for every7 in lib[6]:
seqlib.append([every,
every2,
every3,
every4,
every5,
every6,
every7])
else:
seqlib.append([every,
every2,
every3,
every4,
every5,
every6])
else:
seqlib.append([every,
every2,
every3,
every4,
every5])
else:
seqlib.append([every, every2, every3,
every4, every4])
else:
seqlib.append([every, every2, every3])
else:
seqlib.append([every, every2])
else:
seqlib.append([every])
return seqlib
def gen_seqlib(sequence, pos, lib):
"""
Generates sequences, mutating at pos[x] to all as in lib[x]
Generates sequences, mutating at pos[x] if len(lib)==1,
the same lib will be used for all
Return sequences
"""
# is lib a string?
if isinstance(lib, str):
lib = [lib]
# when only 1 library is given, reuse it
if len(lib) == 1:
while range(1, len(pos)):
lib.append(lib[0])
if len(pos) != len(lib):
msg = 'Bad Input: Dimensions of pos and lib must be equal: '
msg += 'found: #pos: {0}, #lib {1}'.format(len(pos), len(lib))
raise (Exception, msg)
seqlib = combine(lib, pos)
# insert combinations into sequence
sequences_1d = {}
for i in range(0, len(seqlib)):
nfa = list(sequence)
for j, posj in pos:
if nfa[posj].upper() != seqlib[i][j].upper():
nfa[posj] = seqlib[i][j]
modseq = ''.join(nfa)
sequences_1d[modseq] = 1
return sequences_1d
def get_fasta(wtpdb):
"""Return fasta code of wtpdb"""
# preparations
from pyscwrl import babel_pdb_for_scwrl
babel_pdb_for_scwrl(wtpdb)
# read fasta
fasta = ''
for line in open('proper.fasta'):
line = line[:-1]
if line[0] == '>':
# fasta-comment, ignore line
continue
for char in line:
fasta += char.lower()
return fasta
def get_sequences(wtpdb, resids, library):
"""Return list of sequences for resids, created with library"""
print(wtpdb, resids)
# Get the fasta sequence from pdbfile
fasta = get_fasta(wtpdb)
posids = []
# position - ids start from 0 (not 1), so we have to convert
for resid in resids:
posids.append(int(resid)-1)
# generate sequences:
sequences = gen_seqlib(fasta, posids, [library])
return sequences
if __name__ == "__main__":
# Parse Command Line
LIB = config.SatLibs.ALL
def usage():
"""Print Usage and exit"""
print('')
print('Usage:')
print(' ' + sys.argv[0] + ' qprep-wt.pdb res1 [ res2 ...] ]')
print('')
sys.exit(ERR_USAGE)
def get_resnumbers(args):
"""Return residue-numbers as list-of-integers"""
resids = []
for resid in args:
try:
resids.append(int(resid))
except ValueError:
print('ValueError with ', resid, ' expected: Integer')
usage()
if len(resids) > 7:
print('FATAL:')
print('You ask me to mutate more than 7 residues at one time.')
print('This is NOT IMPLEMENTED... ...probably a BAD IDEA :')
print('This is a bad idea, because we grow with LIBRARY^{#RES}!')
print('In your case ', len(LIB), '^', len(LIB), '=',
len(LIB)**len(resids), '!')
usage()
return resids
START = time.time()
if len(sys.argv) < 3:
usage()
if len(get_resnumbers) > 7:
usage()
get_sequences(os.path.abspath(sys.argv[1]),
get_resnumbers(sys.argv[2:]), LIB)
print('time', round(time.time()-START, 2), 's')
|
create
|
This method allows user to create CVE exceptions.
Args:
package_name (str): The name of the vulnerable
package to be excepted.
package_version (str): The version number of the
vulnerable package.
scope (str): Possible values are server, group and all.
scope_id (str): If you pass the value server as scope, this field
will include server ID. If you pass the value group as scope,
this field will include group ID.
Returns:
str: ID of the newly-created cve exception
|
"""CveException Class"""
import cloudpassage.sanity as sanity
from .halo_endpoint import HaloEndpoint
from .http_helper import HttpHelper
class CveExceptions(HaloEndpoint):
"""Initializing the CveException class:
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
Keyword args:
endpoint_version (int): Endpoint version override.
"""
object_name = "cve_exception"
objects_name = "cve_exceptions"
default_endpoint_version = 1
def endpoint(self):
"""Return the endpoint for API requests."""
return "/v{}/{}".format(self.endpoint_version, self.objects_name)
@classmethod
def object_key(cls):
"""Return the key used to pull the object from the json document."""
return cls.object_name
@classmethod
def pagination_key(cls):
"""Return the pagination key for parsing paged results."""
return cls.objects_name
# MASKED: create function (lines 37-74)
def update(self, exception_id, **kwargs):
""" Update CVE Exceptions.
Args:
exception_id (str): Identifier for the CVE exception.
Keyword Args:
scope (str): Possible values are server, group and all.
group_id (str): The ID of the server group containing the server to
which this exception applies.
server_id (str): The ID of the server to which this exception
applies.
cve_entries : List of CVEs
Returns:
True if successful, throws exception otherwise.
"""
endpoint = "{}/{}".format(self.endpoint(), exception_id)
body = {"cve_exception": kwargs}
request = HttpHelper(self.session)
response = request.put(endpoint, body)
return response
# The following class needs to live on only in name, and should absorb the
# functionality of the current CveExceptions class.
class CveException(HaloEndpoint):
"""Initializing the CveException class:
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
"""
object_name = "cve_exception"
objects_name = "cve_exceptions"
default_endpoint_version = 1
def endpoint(self):
"""Return the endpoint for API requests."""
return "/v{}/{}".format(self.endpoint_version, self.objects_name)
@classmethod
def object_key(cls):
"""Return the key used to pull the object from the json document."""
return cls.object_name
@classmethod
def pagination_key(cls):
"""Return the pagination key for parsing paged results."""
return cls.objects_name
|
def create(self, package_name, package_version, scope="all", scope_id=''):
"""This method allows user to create CVE exceptions.
Args:
package_name (str): The name of the vulnerable
package to be excepted.
package_version (str): The version number of the
vulnerable package.
scope (str): Possible values are server, group and all.
scope_id (str): If you pass the value server as scope, this field
will include server ID. If you pass the value group as scope,
this field will include group ID.
Returns:
str: ID of the newly-created cve exception
"""
body_ref = {
"server": "server_id",
"group": "group_id"
}
params = {
"package_name": package_name,
"package_version": package_version,
"scope": scope
}
endpoint = self.endpoint()
if scope != "all":
sanity.validate_cve_exception_scope_id(scope_id)
scope_key = body_ref[scope]
params[scope_key] = scope_id
body = {"cve_exception": params}
request = HttpHelper(self.session)
response = request.post(endpoint, body)
return response["cve_exception"]["id"]
| 37
| 74
|
"""CveException Class"""
import cloudpassage.sanity as sanity
from .halo_endpoint import HaloEndpoint
from .http_helper import HttpHelper
class CveExceptions(HaloEndpoint):
"""Initializing the CveException class:
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
Keyword args:
endpoint_version (int): Endpoint version override.
"""
object_name = "cve_exception"
objects_name = "cve_exceptions"
default_endpoint_version = 1
def endpoint(self):
"""Return the endpoint for API requests."""
return "/v{}/{}".format(self.endpoint_version, self.objects_name)
@classmethod
def object_key(cls):
"""Return the key used to pull the object from the json document."""
return cls.object_name
@classmethod
def pagination_key(cls):
"""Return the pagination key for parsing paged results."""
return cls.objects_name
def create(self, package_name, package_version, scope="all", scope_id=''):
"""This method allows user to create CVE exceptions.
Args:
package_name (str): The name of the vulnerable
package to be excepted.
package_version (str): The version number of the
vulnerable package.
scope (str): Possible values are server, group and all.
scope_id (str): If you pass the value server as scope, this field
will include server ID. If you pass the value group as scope,
this field will include group ID.
Returns:
str: ID of the newly-created cve exception
"""
body_ref = {
"server": "server_id",
"group": "group_id"
}
params = {
"package_name": package_name,
"package_version": package_version,
"scope": scope
}
endpoint = self.endpoint()
if scope != "all":
sanity.validate_cve_exception_scope_id(scope_id)
scope_key = body_ref[scope]
params[scope_key] = scope_id
body = {"cve_exception": params}
request = HttpHelper(self.session)
response = request.post(endpoint, body)
return response["cve_exception"]["id"]
def update(self, exception_id, **kwargs):
""" Update CVE Exceptions.
Args:
exception_id (str): Identifier for the CVE exception.
Keyword Args:
scope (str): Possible values are server, group and all.
group_id (str): The ID of the server group containing the server to
which this exception applies.
server_id (str): The ID of the server to which this exception
applies.
cve_entries : List of CVEs
Returns:
True if successful, throws exception otherwise.
"""
endpoint = "{}/{}".format(self.endpoint(), exception_id)
body = {"cve_exception": kwargs}
request = HttpHelper(self.session)
response = request.put(endpoint, body)
return response
# The following class needs to live on only in name, and should absorb the
# functionality of the current CveExceptions class.
class CveException(HaloEndpoint):
"""Initializing the CveException class:
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
"""
object_name = "cve_exception"
objects_name = "cve_exceptions"
default_endpoint_version = 1
def endpoint(self):
"""Return the endpoint for API requests."""
return "/v{}/{}".format(self.endpoint_version, self.objects_name)
@classmethod
def object_key(cls):
"""Return the key used to pull the object from the json document."""
return cls.object_name
@classmethod
def pagination_key(cls):
"""Return the pagination key for parsing paged results."""
return cls.objects_name
|
get_symbol
|
This is a helper function to get a companies full
name based on the stock symbol.
Functional usage example:
```python
import janitor.finance
janitor.finance.get_symbol("aapl")
```
:param symbol: This is our stock symbol that we use
to query the api for the companies full name.
:return: Company full name
|
"""
Finance-specific data cleaning functions.
"""
import json
from datetime import date
from functools import lru_cache
import pandas as pd
import pandas_flavor as pf
import requests
from janitor.errors import JanitorError
from .utils import check, deprecated_alias, is_connected
currency_set = {
"AUD",
"BGN",
"BRL",
"CAD",
"CHF",
"CNY",
"CZK",
"DKK",
"EUR",
"GBP",
"HKD",
"HRK",
"HUF",
"IDR",
"ILS",
"INR",
"ISK",
"JPY",
"KRW",
"MXN",
"MYR",
"NOK",
"NZD",
"PHP",
"PLN",
"RON",
"RUB",
"SEK",
"SGD",
"THB",
"TRY",
"USD",
"ZAR",
}
# Dictionary of recognized World Bank countries and their abbreviations
wb_country_dict = {
"Aruba": "ABW",
"Afghanistan": "AFG",
"Angola": "AGO",
"Albania": "ALB",
"Andorra": "AND",
"Arab World": "ARB",
"United Arab Emirates": "ARE",
"Argentina": "ARG",
"Armenia": "ARM",
"American Samoa": "ASM",
"Antigua and Barbuda": "ATG",
"Australia": "AUS",
"Austria": "AUT",
"Azerbaijan": "AZE",
"Burundi": "BDI",
"Belgium": "BEL",
"Benin": "BEN",
"Burkina Faso": "BFA",
"Bangladesh": "BGD",
"Bulgaria": "BGR",
"Bahrain": "BHR",
"Bahamas, The": "BHS",
"Bosnia and Herzegovina": "BIH",
"Belarus": "BLR",
"Belize": "BLZ",
"Bermuda": "BMU",
"Bolivia": "BOL",
"Brazil": "BRA",
"Barbados": "BRB",
"Brunei Darussalam": "BRN",
"Bhutan": "BTN",
"Botswana": "BWA",
"Central African Republic": "CAF",
"Canada": "CAN",
"Central Europe and the Baltics": "CEB",
"Switzerland": "CHE",
"Channel Islands": "CHI",
"Chile": "CHL",
"China": "CHN",
"Cote d'Ivoire": "CIV",
"Cameroon": "CMR",
"Congo, Dem. Rep.": "COD",
"Congo, Rep.": "COG",
"Colombia": "COL",
"Comoros": "COM",
"Cabo Verde": "CPV",
"Costa Rica": "CRI",
"Caribbean small states": "CSS",
"Cuba": "CUB",
"Curacao": "CUW",
"Cayman Islands": "CYM",
"Cyprus": "CYP",
"Czech Republic": "CZE",
"Germany": "DEU",
"Djibouti": "DJI",
"Dominica": "DMA",
"Denmark": "DNK",
"Dominican Republic": "DOM",
"Algeria": "DZA",
"East Asia & Pacific (excluding high income)": "EAP",
"Early-demographic dividend": "EAR",
"East Asia & Pacific": "EAS",
"Europe & Central Asia (excluding high income)": "ECA",
"Europe & Central Asia": "ECS",
"Ecuador": "ECU",
"Egypt, Arab Rep.": "EGY",
"Euro area": "EMU",
"Eritrea": "ERI",
"Spain": "ESP",
"Estonia": "EST",
"Ethiopia": "ETH",
"European Union": "EUU",
"Fragile and conflict affected situations": "FCS",
"Finland": "FIN",
"Fiji": "FJI",
"France": "FRA",
"Faroe Islands": "FRO",
"Micronesia, Fed. Sts.": "FSM",
"Gabon": "GAB",
"United Kingdom": "GBR",
"Georgia": "GEO",
"Ghana": "GHA",
"Gibraltar": "GIB",
"Guinea": "GIN",
"Gambia, The": "GMB",
"Guinea-Bissau": "GNB",
"Equatorial Guinea": "GNQ",
"Greece": "GRC",
"Grenada": "GRD",
"Greenland": "GRL",
"Guatemala": "GTM",
"Guam": "GUM",
"Guyana": "GUY",
"High income": "HIC",
"Hong Kong SAR, China": "HKG",
"Honduras": "HND",
"Heavily indebted poor countries (HIPC)": "HPC",
"Croatia": "HRV",
"Haiti": "HTI",
"Hungary": "HUN",
"IBRD only": "IBD",
"IDA & IBRD total": "IBT",
"IDA total": "IDA",
"IDA blend": "IDB",
"Indonesia": "IDN",
"IDA only": "IDX",
"Isle of Man": "IMN",
"India": "IND",
"Not classified": "INX",
"Ireland": "IRL",
"Iran, Islamic Rep.": "IRN",
"Iraq": "IRQ",
"Iceland": "ISL",
"Israel": "ISR",
"Italy": "ITA",
"Jamaica": "JAM",
"Jordan": "JOR",
"Japan": "JPN",
"Kazakhstan": "KAZ",
"Kenya": "KEN",
"Kyrgyz Republic": "KGZ",
"Cambodia": "KHM",
"Kiribati": "KIR",
"St. Kitts and Nevis": "KNA",
"Korea, Rep.": "KOR",
"Kuwait": "KWT",
"Latin America & Caribbean (excluding high income)": "LAC",
"Lao PDR": "LAO",
"Lebanon": "LBN",
"Liberia": "LBR",
"Libya": "LBY",
"St. Lucia": "LCA",
"Latin America & Caribbean": "LCN",
"Least developed countries: UN classification": "LDC",
"Low income": "LIC",
"Liechtenstein": "LIE",
"Sri Lanka": "LKA",
"Lower middle income": "LMC",
"Low & middle income": "LMY",
"Lesotho": "LSO",
"Late-demographic dividend": "LTE",
"Lithuania": "LTU",
"Luxembourg": "LUX",
"Latvia": "LVA",
"Macao SAR, China": "MAC",
"St. Martin (French part)": "MAF",
"Morocco": "MAR",
"Monaco": "MCO",
"Moldova": "MDA",
"Madagascar": "MDG",
"Maldives": "MDV",
"Middle East & North Africa": "MEA",
"Mexico": "MEX",
"Marshall Islands": "MHL",
"Middle income": "MIC",
"North Macedonia": "MKD",
"Mali": "MLI",
"Malta": "MLT",
"Myanmar": "MMR",
"Middle East & North Africa (excluding high income)": "MNA",
"Montenegro": "MNE",
"Mongolia": "MNG",
"Northern Mariana Islands": "MNP",
"Mozambique": "MOZ",
"Mauritania": "MRT",
"Mauritius": "MUS",
"Malawi": "MWI",
"Malaysia": "MYS",
"North America": "NAC",
"Namibia": "NAM",
"New Caledonia": "NCL",
"Niger": "NER",
"Nigeria": "NGA",
"Nicaragua": "NIC",
"Netherlands": "NLD",
"Norway": "NOR",
"Nepal": "NPL",
"Nauru": "NRU",
"New Zealand": "NZL",
"OECD members": "OED",
"Oman": "OMN",
"Other small states": "OSS",
"Pakistan": "PAK",
"Panama": "PAN",
"Peru": "PER",
"Philippines": "PHL",
"Palau": "PLW",
"Papua New Guinea": "PNG",
"Poland": "POL",
"Pre-demographic dividend": "PRE",
"Puerto Rico": "PRI",
"Korea, Dem. People's Rep.": "PRK",
"Portugal": "PRT",
"Paraguay": "PRY",
"West Bank and Gaza": "PSE",
"Pacific island small states": "PSS",
"Post-demographic dividend": "PST",
"French Polynesia": "PYF",
"Qatar": "QAT",
"Romania": "ROU",
"Russian Federation": "RUS",
"Rwanda": "RWA",
"South Asia": "SAS",
"Saudi Arabia": "SAU",
"Sudan": "SDN",
"Senegal": "SEN",
"Singapore": "SGP",
"Solomon Islands": "SLB",
"Sierra Leone": "SLE",
"El Salvador": "SLV",
"San Marino": "SMR",
"Somalia": "SOM",
"Serbia": "SRB",
"Sub-Saharan Africa (excluding high income)": "SSA",
"South Sudan": "SSD",
"Sub-Saharan Africa": "SSF",
"Small states": "SST",
"Sao Tome and Principe": "STP",
"Suriname": "SUR",
"Slovak Republic": "SVK",
"Slovenia": "SVN",
"Sweden": "SWE",
"Eswatini": "SWZ",
"Sint Maarten (Dutch part)": "SXM",
"Seychelles": "SYC",
"Syrian Arab Republic": "SYR",
"Turks and Caicos Islands": "TCA",
"Chad": "TCD",
"East Asia & Pacific (IDA & IBRD countries)": "TEA",
"Europe & Central Asia (IDA & IBRD countries)": "TEC",
"Togo": "TGO",
"Thailand": "THA",
"Tajikistan": "TJK",
"Turkmenistan": "TKM",
"Latin America & the Caribbean (IDA & IBRD countries)": "TLA",
"Timor-Leste": "TLS",
"Middle East & North Africa (IDA & IBRD countries)": "TMN",
"Tonga": "TON",
"South Asia (IDA & IBRD)": "TSA",
"Sub-Saharan Africa (IDA & IBRD countries)": "TSS",
"Trinidad and Tobago": "TTO",
"Tunisia": "TUN",
"Turkey": "TUR",
"Tuvalu": "TUV",
"Tanzania": "TZA",
"Uganda": "UGA",
"Ukraine": "UKR",
"Upper middle income": "UMC",
"Uruguay": "URY",
"United States": "USA",
"Uzbekistan": "UZB",
"St. Vincent and the Grenadines": "VCT",
"Venezuela, RB": "VEN",
"British Virgin Islands": "VGB",
"Virgin Islands (U.S.)": "VIR",
"Vietnam": "VNM",
"Vanuatu": "VUT",
"World": "WLD",
"Samoa": "WSM",
"Kosovo": "XKX",
"Yemen, Rep.": "YEM",
"South Africa": "ZAF",
"Zambia": "ZMB",
"Zimbabwe": "ZWE",
}
def _check_currency(currency: str):
"""Check that currency is in supported set."""
if currency not in currency_set:
raise ValueError(
f"currency {currency} not in supported currency set, "
f"{currency_set}"
)
def _check_wb_country(country: str):
"""Check that world bank country is in supported set."""
if (country not in wb_country_dict.keys()) & (
country not in wb_country_dict.values() # noqa: PD011
):
raise ValueError(
f"country {country} not in supported World Bank country dict, "
f"{wb_country_dict}"
)
def _check_wb_years(year: int):
"""Check that year is in world bank dataset years."""
if year < 1960:
raise ValueError("year value must be 1960 or later")
# @lru_cache(maxsize=32)
# def _convert_currency(
# api_key: str,
# from_currency: str = None,
# to_currency: str = None,
# historical_date: Optional[date] = None,
# ) -> float:
# """
# Currency conversion for Pandas DataFrame column.
# Helper function for `convert_currency` method.
# The API used is https://exchangeratesapi.io/.
# """
# url = "http://api.exchangeratesapi.io"
# if historical_date:
# check("historical_date", historical_date, [datetime, date])
# if isinstance(historical_date, datetime):
# if historical_date < datetime(1999, 1, 4):
# raise ValueError(
# "historical_date:datetime must be later than 1999-01-04!"
# )
# string_date = str(historical_date)[:10]
# else:
# if historical_date < date(1999, 1, 4):
# raise ValueError(
# "historical_date:date must be later than 1999-01-04!"
# )
# string_date = str(historical_date)
# url = url + "/%s" % string_date
# else:
# url = url + "/latest"
# _check_currency(from_currency)
# _check_currency(to_currency)
# payload = {
# # "base": from_currency,
# "symbols": to_currency,
# "access_key": api_key,
# }
# result = requests.get(url, params=payload)
# if result.status_code != 200:
# raise ConnectionError(
# "Exchange Rate API failed to receive a 200 "
# "response from the server. "
# "Please try again later."
# )
# currency_dict = json.loads(result.text)
# rate = currency_dict["rates"][to_currency]
# return rate
@pf.register_dataframe_method
@deprecated_alias(colname="column_name")
def convert_currency(
df: pd.DataFrame,
api_key: str,
column_name: str = None,
from_currency: str = None,
to_currency: str = None,
historical_date: date = None,
make_new_column: bool = False,
) -> pd.DataFrame:
"""Deprecated function."""
raise JanitorError(
"The `convert_currency` function has been temporarily disabled due to "
"exchangeratesapi.io disallowing free pinging of its API. "
"(Our tests started to fail due to this issue.) "
"There is no easy way around this problem "
"except to find a new API to call on."
"Please comment on issue #829 "
"(https://github.com/pyjanitor-devs/pyjanitor/issues/829) "
"if you know of an alternative API that we can call on, "
"otherwise the function will be removed in pyjanitor's 1.0 release."
)
# @pf.register_dataframe_method
# @deprecated_alias(colname="column_name")
# def convert_currency(
# df: pd.DataFrame,
# api_key: str,
# column_name: str = None,
# from_currency: str = None,
# to_currency: str = None,
# historical_date: date = None,
# make_new_column: bool = False,
# ) -> pd.DataFrame:
# """
# Converts a column from one currency to another, with an option to
# convert based on historical exchange values.
# On April 10 2021,
# we discovered that there was no more free API available.
# Thus, an API key is required to perform currency conversion.
# API keys should be set as an environment variable,
# for example, `EXCHANGE_RATE_API_KEY``,
# and then passed into the function
# by calling on `os.getenv("EXCHANGE_RATE_APIKEY")``.
# :param df: A pandas dataframe.
# :param api_key: exchangeratesapi.io API key.
# :param column_name: Name of the new column. Should be a string, in order
# for the column name to be compatible with the Feather binary
# format (this is a useful thing to have).
# :param from_currency: The base currency to convert from.
# May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
# "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
# "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
# "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
# "ZAR"}
# :param to_currency: The target currency to convert to.
# May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
# "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
# "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
# "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
# "ZAR"}
# :param historical_date: If supplied,
# get exchange rate on a certain date.
# If not supplied, get the latest exchange rate.
# The exchange rates go back to Jan. 4, 1999.
# :param make_new_column: Generates new column
# for converted currency if True,
# otherwise, converts currency in place.
# :returns: The dataframe with converted currency column.
# .. code-block:: python
# import pandas as pd
# import janitor
# from datetime import date
# data_dict = {
# "a": [1.23452345, 2.456234, 3.2346125] * 3,
# "Bell__Chart": [1/3, 2/7, 3/2] * 3,
# "decorated-elephant": [1/234, 2/13, 3/167] * 3,
# "animals": ["rabbit", "leopard", "lion"] * 3,
# "cities": ["Cambridge", "Shanghai", "Basel"] * 3,
# }
# example_dataframe = pd.DataFrame(data_dict)
# Example: Converting a column from one currency to another
# using rates from 01/01/2018.
# .. code-block:: python
# example_dataframe.convert_currency('a', from_currency='USD',
# to_currency='EUR', historical_date=date(2018,1,1))
# Output:
# .. code-block:: python
# a Bell__Chart decorated-elephant animals cities
# 0 1.029370 0.333333 0.004274 rabbit Cambridge
# 1 2.048056 0.285714 0.153846 leopard Shanghai
# 2 2.697084 1.500000 0.017964 lion Basel
# 3 1.029370 0.333333 0.004274 rabbit Cambridge
# 4 2.048056 0.285714 0.153846 leopard Shanghai
# 5 2.697084 1.500000 0.017964 lion Basel
# 6 1.029370 0.333333 0.004274 rabbit Cambridge
# 7 2.048056 0.285714 0.153846 leopard Shanghai
# 8 2.697084 1.500000 0.017964 lion Basel
# """
# rate = _convert_currency(
# api_key, from_currency, to_currency, historical_date
# )
# if make_new_column:
# # new_column_name = column_name + "_" + to_currency
# column_name = column_name + "_" + to_currency
# df = df.assign(column_name=df[column_name] * rate)
# return df
@lru_cache(maxsize=32)
def _inflate_currency(
country: str = None, currency_year: int = None, to_year: int = None
) -> float:
"""
Currency inflation for Pandas DataFrame column.
Helper function for `inflate_currency` method.
The API used is the World Bank Indicator API:
https://datahelpdesk.worldbank.org/knowledgebase/articles/889392-about-the-indicators-api-documentation
"""
# Check all inputs are correct data type
check("country", country, [str])
check("currency_year", currency_year, [int])
check("to_year", to_year, [int])
# Get WB country abbreviation
_check_wb_country(country)
if country in wb_country_dict.keys():
country = wb_country_dict[country]
else:
# `country` is already a correct abbreviation; do nothing
pass
_check_wb_years(currency_year)
_check_wb_years(to_year)
url = (
"https://api.worldbank.org/v2/country/"
+ country
+ "/indicator/FP.CPI.TOTL?date="
+ str(min(currency_year, to_year))
+ ":"
+ str(max(currency_year, to_year))
+ "&format=json"
)
result = requests.get(url)
if result.status_code != 200:
raise ConnectionError(
"WB Indicator API failed to receive a 200 "
"response from the server. "
"Please try again later."
)
# The API returns a list of two items;
# the second item in the list is what we want
inflation_dict = json.loads(result.text)[1]
# Error checking
if inflation_dict is None:
raise ValueError(
"The WB Indicator API returned nothing. "
"This likely means the currency_year and "
"to_year are outside of the year range for "
"which the WB has inflation data for the "
"specified country."
)
# Create new dict with only the year and inflation values
inflation_dict_ready = {
int(inflation_dict[i]["date"]): float(inflation_dict[i]["value"])
for i in range(len(inflation_dict))
if inflation_dict[i]["value"] is not None
}
# Error catching
if currency_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {currency_year} for {country}."
)
if to_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {to_year} for {country}."
)
inflator = (
inflation_dict_ready[to_year] / inflation_dict_ready[currency_year]
)
return inflator
@pf.register_dataframe_method
def inflate_currency(
df: pd.DataFrame,
column_name: str = None,
country: str = None,
currency_year: int = None,
to_year: int = None,
make_new_column: bool = False,
) -> pd.DataFrame:
"""
Inflates a column of monetary values from one year to another, based on
the currency's country.
The provided country can be any economy name or code from the World Bank
[list of economies]
(https://databank.worldbank.org/data/download/site-content/CLASS.xls).
**Note**: This method mutates the original DataFrame.
Method chaining usage example:
>>> import pandas as pd
>>> import janitor.finance
>>> df = pd.DataFrame({"profit":[100.10, 200.20, 300.30, 400.40, 500.50]})
>>> df
profit
0 100.1
1 200.2
2 300.3
3 400.4
4 500.5
>>> df.inflate_currency(
... column_name='profit',
... country='USA',
... currency_year=2015,
... to_year=2018,
... make_new_column=True
... )
profit profit_2018
0 100.1 106.050596
1 200.2 212.101191
2 300.3 318.151787
3 400.4 424.202382
4 500.5 530.252978
:param df: A pandas DataFrame.
:param column_name: Name of the column containing monetary
values to inflate.
:param country: The country associated with the currency being inflated.
May be any economy or code from the World Bank [List of economies]
(https://databank.worldbank.org/data/download/site-content/CLASS.xls).
:param currency_year: The currency year to inflate from.
The year should be 1960 or later.
:param to_year: The currency year to inflate to.
The year should be 1960 or later.
:param make_new_column: Generates new column for inflated currency if
True, otherwise, inflates currency in place.
:returns: The dataframe with inflated currency column.
"""
inflator = _inflate_currency(country, currency_year, to_year)
if make_new_column:
new_column_name = column_name + "_" + str(to_year)
df[new_column_name] = df[column_name] * inflator
else:
df[column_name] = df[column_name] * inflator
return df
def convert_stock(stock_symbol: str) -> str:
"""
This function takes in a stock symbol as a parameter,
queries an API for the companies full name and returns
it
Functional usage example:
```python
import janitor.finance
janitor.finance.convert_stock("aapl")
```
:param stock_symbol: Stock ticker Symbol
:raises ConnectionError: Internet connection is not available
:returns: Full company name
"""
if is_connected("www.google.com"):
stock_symbol = stock_symbol.upper()
return get_symbol(stock_symbol)
else:
raise ConnectionError(
"Connection Error: Client Not Connected to Internet"
)
# MASKED: get_symbol function (lines 719-745)
|
def get_symbol(symbol: str):
"""
This is a helper function to get a companies full
name based on the stock symbol.
Functional usage example:
```python
import janitor.finance
janitor.finance.get_symbol("aapl")
```
:param symbol: This is our stock symbol that we use
to query the api for the companies full name.
:return: Company full name
"""
result = requests.get(
"http://d.yimg.com/autoc."
+ "finance.yahoo.com/autoc?query={}®ion=1&lang=en".format(symbol)
).json()
for x in result["ResultSet"]["Result"]:
if x["symbol"] == symbol:
return x["name"]
else:
return None
| 719
| 745
|
"""
Finance-specific data cleaning functions.
"""
import json
from datetime import date
from functools import lru_cache
import pandas as pd
import pandas_flavor as pf
import requests
from janitor.errors import JanitorError
from .utils import check, deprecated_alias, is_connected
currency_set = {
"AUD",
"BGN",
"BRL",
"CAD",
"CHF",
"CNY",
"CZK",
"DKK",
"EUR",
"GBP",
"HKD",
"HRK",
"HUF",
"IDR",
"ILS",
"INR",
"ISK",
"JPY",
"KRW",
"MXN",
"MYR",
"NOK",
"NZD",
"PHP",
"PLN",
"RON",
"RUB",
"SEK",
"SGD",
"THB",
"TRY",
"USD",
"ZAR",
}
# Dictionary of recognized World Bank countries and their abbreviations
wb_country_dict = {
"Aruba": "ABW",
"Afghanistan": "AFG",
"Angola": "AGO",
"Albania": "ALB",
"Andorra": "AND",
"Arab World": "ARB",
"United Arab Emirates": "ARE",
"Argentina": "ARG",
"Armenia": "ARM",
"American Samoa": "ASM",
"Antigua and Barbuda": "ATG",
"Australia": "AUS",
"Austria": "AUT",
"Azerbaijan": "AZE",
"Burundi": "BDI",
"Belgium": "BEL",
"Benin": "BEN",
"Burkina Faso": "BFA",
"Bangladesh": "BGD",
"Bulgaria": "BGR",
"Bahrain": "BHR",
"Bahamas, The": "BHS",
"Bosnia and Herzegovina": "BIH",
"Belarus": "BLR",
"Belize": "BLZ",
"Bermuda": "BMU",
"Bolivia": "BOL",
"Brazil": "BRA",
"Barbados": "BRB",
"Brunei Darussalam": "BRN",
"Bhutan": "BTN",
"Botswana": "BWA",
"Central African Republic": "CAF",
"Canada": "CAN",
"Central Europe and the Baltics": "CEB",
"Switzerland": "CHE",
"Channel Islands": "CHI",
"Chile": "CHL",
"China": "CHN",
"Cote d'Ivoire": "CIV",
"Cameroon": "CMR",
"Congo, Dem. Rep.": "COD",
"Congo, Rep.": "COG",
"Colombia": "COL",
"Comoros": "COM",
"Cabo Verde": "CPV",
"Costa Rica": "CRI",
"Caribbean small states": "CSS",
"Cuba": "CUB",
"Curacao": "CUW",
"Cayman Islands": "CYM",
"Cyprus": "CYP",
"Czech Republic": "CZE",
"Germany": "DEU",
"Djibouti": "DJI",
"Dominica": "DMA",
"Denmark": "DNK",
"Dominican Republic": "DOM",
"Algeria": "DZA",
"East Asia & Pacific (excluding high income)": "EAP",
"Early-demographic dividend": "EAR",
"East Asia & Pacific": "EAS",
"Europe & Central Asia (excluding high income)": "ECA",
"Europe & Central Asia": "ECS",
"Ecuador": "ECU",
"Egypt, Arab Rep.": "EGY",
"Euro area": "EMU",
"Eritrea": "ERI",
"Spain": "ESP",
"Estonia": "EST",
"Ethiopia": "ETH",
"European Union": "EUU",
"Fragile and conflict affected situations": "FCS",
"Finland": "FIN",
"Fiji": "FJI",
"France": "FRA",
"Faroe Islands": "FRO",
"Micronesia, Fed. Sts.": "FSM",
"Gabon": "GAB",
"United Kingdom": "GBR",
"Georgia": "GEO",
"Ghana": "GHA",
"Gibraltar": "GIB",
"Guinea": "GIN",
"Gambia, The": "GMB",
"Guinea-Bissau": "GNB",
"Equatorial Guinea": "GNQ",
"Greece": "GRC",
"Grenada": "GRD",
"Greenland": "GRL",
"Guatemala": "GTM",
"Guam": "GUM",
"Guyana": "GUY",
"High income": "HIC",
"Hong Kong SAR, China": "HKG",
"Honduras": "HND",
"Heavily indebted poor countries (HIPC)": "HPC",
"Croatia": "HRV",
"Haiti": "HTI",
"Hungary": "HUN",
"IBRD only": "IBD",
"IDA & IBRD total": "IBT",
"IDA total": "IDA",
"IDA blend": "IDB",
"Indonesia": "IDN",
"IDA only": "IDX",
"Isle of Man": "IMN",
"India": "IND",
"Not classified": "INX",
"Ireland": "IRL",
"Iran, Islamic Rep.": "IRN",
"Iraq": "IRQ",
"Iceland": "ISL",
"Israel": "ISR",
"Italy": "ITA",
"Jamaica": "JAM",
"Jordan": "JOR",
"Japan": "JPN",
"Kazakhstan": "KAZ",
"Kenya": "KEN",
"Kyrgyz Republic": "KGZ",
"Cambodia": "KHM",
"Kiribati": "KIR",
"St. Kitts and Nevis": "KNA",
"Korea, Rep.": "KOR",
"Kuwait": "KWT",
"Latin America & Caribbean (excluding high income)": "LAC",
"Lao PDR": "LAO",
"Lebanon": "LBN",
"Liberia": "LBR",
"Libya": "LBY",
"St. Lucia": "LCA",
"Latin America & Caribbean": "LCN",
"Least developed countries: UN classification": "LDC",
"Low income": "LIC",
"Liechtenstein": "LIE",
"Sri Lanka": "LKA",
"Lower middle income": "LMC",
"Low & middle income": "LMY",
"Lesotho": "LSO",
"Late-demographic dividend": "LTE",
"Lithuania": "LTU",
"Luxembourg": "LUX",
"Latvia": "LVA",
"Macao SAR, China": "MAC",
"St. Martin (French part)": "MAF",
"Morocco": "MAR",
"Monaco": "MCO",
"Moldova": "MDA",
"Madagascar": "MDG",
"Maldives": "MDV",
"Middle East & North Africa": "MEA",
"Mexico": "MEX",
"Marshall Islands": "MHL",
"Middle income": "MIC",
"North Macedonia": "MKD",
"Mali": "MLI",
"Malta": "MLT",
"Myanmar": "MMR",
"Middle East & North Africa (excluding high income)": "MNA",
"Montenegro": "MNE",
"Mongolia": "MNG",
"Northern Mariana Islands": "MNP",
"Mozambique": "MOZ",
"Mauritania": "MRT",
"Mauritius": "MUS",
"Malawi": "MWI",
"Malaysia": "MYS",
"North America": "NAC",
"Namibia": "NAM",
"New Caledonia": "NCL",
"Niger": "NER",
"Nigeria": "NGA",
"Nicaragua": "NIC",
"Netherlands": "NLD",
"Norway": "NOR",
"Nepal": "NPL",
"Nauru": "NRU",
"New Zealand": "NZL",
"OECD members": "OED",
"Oman": "OMN",
"Other small states": "OSS",
"Pakistan": "PAK",
"Panama": "PAN",
"Peru": "PER",
"Philippines": "PHL",
"Palau": "PLW",
"Papua New Guinea": "PNG",
"Poland": "POL",
"Pre-demographic dividend": "PRE",
"Puerto Rico": "PRI",
"Korea, Dem. People's Rep.": "PRK",
"Portugal": "PRT",
"Paraguay": "PRY",
"West Bank and Gaza": "PSE",
"Pacific island small states": "PSS",
"Post-demographic dividend": "PST",
"French Polynesia": "PYF",
"Qatar": "QAT",
"Romania": "ROU",
"Russian Federation": "RUS",
"Rwanda": "RWA",
"South Asia": "SAS",
"Saudi Arabia": "SAU",
"Sudan": "SDN",
"Senegal": "SEN",
"Singapore": "SGP",
"Solomon Islands": "SLB",
"Sierra Leone": "SLE",
"El Salvador": "SLV",
"San Marino": "SMR",
"Somalia": "SOM",
"Serbia": "SRB",
"Sub-Saharan Africa (excluding high income)": "SSA",
"South Sudan": "SSD",
"Sub-Saharan Africa": "SSF",
"Small states": "SST",
"Sao Tome and Principe": "STP",
"Suriname": "SUR",
"Slovak Republic": "SVK",
"Slovenia": "SVN",
"Sweden": "SWE",
"Eswatini": "SWZ",
"Sint Maarten (Dutch part)": "SXM",
"Seychelles": "SYC",
"Syrian Arab Republic": "SYR",
"Turks and Caicos Islands": "TCA",
"Chad": "TCD",
"East Asia & Pacific (IDA & IBRD countries)": "TEA",
"Europe & Central Asia (IDA & IBRD countries)": "TEC",
"Togo": "TGO",
"Thailand": "THA",
"Tajikistan": "TJK",
"Turkmenistan": "TKM",
"Latin America & the Caribbean (IDA & IBRD countries)": "TLA",
"Timor-Leste": "TLS",
"Middle East & North Africa (IDA & IBRD countries)": "TMN",
"Tonga": "TON",
"South Asia (IDA & IBRD)": "TSA",
"Sub-Saharan Africa (IDA & IBRD countries)": "TSS",
"Trinidad and Tobago": "TTO",
"Tunisia": "TUN",
"Turkey": "TUR",
"Tuvalu": "TUV",
"Tanzania": "TZA",
"Uganda": "UGA",
"Ukraine": "UKR",
"Upper middle income": "UMC",
"Uruguay": "URY",
"United States": "USA",
"Uzbekistan": "UZB",
"St. Vincent and the Grenadines": "VCT",
"Venezuela, RB": "VEN",
"British Virgin Islands": "VGB",
"Virgin Islands (U.S.)": "VIR",
"Vietnam": "VNM",
"Vanuatu": "VUT",
"World": "WLD",
"Samoa": "WSM",
"Kosovo": "XKX",
"Yemen, Rep.": "YEM",
"South Africa": "ZAF",
"Zambia": "ZMB",
"Zimbabwe": "ZWE",
}
def _check_currency(currency: str):
"""Check that currency is in supported set."""
if currency not in currency_set:
raise ValueError(
f"currency {currency} not in supported currency set, "
f"{currency_set}"
)
def _check_wb_country(country: str):
"""Check that world bank country is in supported set."""
if (country not in wb_country_dict.keys()) & (
country not in wb_country_dict.values() # noqa: PD011
):
raise ValueError(
f"country {country} not in supported World Bank country dict, "
f"{wb_country_dict}"
)
def _check_wb_years(year: int):
"""Check that year is in world bank dataset years."""
if year < 1960:
raise ValueError("year value must be 1960 or later")
# @lru_cache(maxsize=32)
# def _convert_currency(
# api_key: str,
# from_currency: str = None,
# to_currency: str = None,
# historical_date: Optional[date] = None,
# ) -> float:
# """
# Currency conversion for Pandas DataFrame column.
# Helper function for `convert_currency` method.
# The API used is https://exchangeratesapi.io/.
# """
# url = "http://api.exchangeratesapi.io"
# if historical_date:
# check("historical_date", historical_date, [datetime, date])
# if isinstance(historical_date, datetime):
# if historical_date < datetime(1999, 1, 4):
# raise ValueError(
# "historical_date:datetime must be later than 1999-01-04!"
# )
# string_date = str(historical_date)[:10]
# else:
# if historical_date < date(1999, 1, 4):
# raise ValueError(
# "historical_date:date must be later than 1999-01-04!"
# )
# string_date = str(historical_date)
# url = url + "/%s" % string_date
# else:
# url = url + "/latest"
# _check_currency(from_currency)
# _check_currency(to_currency)
# payload = {
# # "base": from_currency,
# "symbols": to_currency,
# "access_key": api_key,
# }
# result = requests.get(url, params=payload)
# if result.status_code != 200:
# raise ConnectionError(
# "Exchange Rate API failed to receive a 200 "
# "response from the server. "
# "Please try again later."
# )
# currency_dict = json.loads(result.text)
# rate = currency_dict["rates"][to_currency]
# return rate
@pf.register_dataframe_method
@deprecated_alias(colname="column_name")
def convert_currency(
df: pd.DataFrame,
api_key: str,
column_name: str = None,
from_currency: str = None,
to_currency: str = None,
historical_date: date = None,
make_new_column: bool = False,
) -> pd.DataFrame:
"""Deprecated function."""
raise JanitorError(
"The `convert_currency` function has been temporarily disabled due to "
"exchangeratesapi.io disallowing free pinging of its API. "
"(Our tests started to fail due to this issue.) "
"There is no easy way around this problem "
"except to find a new API to call on."
"Please comment on issue #829 "
"(https://github.com/pyjanitor-devs/pyjanitor/issues/829) "
"if you know of an alternative API that we can call on, "
"otherwise the function will be removed in pyjanitor's 1.0 release."
)
# @pf.register_dataframe_method
# @deprecated_alias(colname="column_name")
# def convert_currency(
# df: pd.DataFrame,
# api_key: str,
# column_name: str = None,
# from_currency: str = None,
# to_currency: str = None,
# historical_date: date = None,
# make_new_column: bool = False,
# ) -> pd.DataFrame:
# """
# Converts a column from one currency to another, with an option to
# convert based on historical exchange values.
# On April 10 2021,
# we discovered that there was no more free API available.
# Thus, an API key is required to perform currency conversion.
# API keys should be set as an environment variable,
# for example, `EXCHANGE_RATE_API_KEY``,
# and then passed into the function
# by calling on `os.getenv("EXCHANGE_RATE_APIKEY")``.
# :param df: A pandas dataframe.
# :param api_key: exchangeratesapi.io API key.
# :param column_name: Name of the new column. Should be a string, in order
# for the column name to be compatible with the Feather binary
# format (this is a useful thing to have).
# :param from_currency: The base currency to convert from.
# May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
# "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
# "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
# "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
# "ZAR"}
# :param to_currency: The target currency to convert to.
# May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
# "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
# "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
# "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
# "ZAR"}
# :param historical_date: If supplied,
# get exchange rate on a certain date.
# If not supplied, get the latest exchange rate.
# The exchange rates go back to Jan. 4, 1999.
# :param make_new_column: Generates new column
# for converted currency if True,
# otherwise, converts currency in place.
# :returns: The dataframe with converted currency column.
# .. code-block:: python
# import pandas as pd
# import janitor
# from datetime import date
# data_dict = {
# "a": [1.23452345, 2.456234, 3.2346125] * 3,
# "Bell__Chart": [1/3, 2/7, 3/2] * 3,
# "decorated-elephant": [1/234, 2/13, 3/167] * 3,
# "animals": ["rabbit", "leopard", "lion"] * 3,
# "cities": ["Cambridge", "Shanghai", "Basel"] * 3,
# }
# example_dataframe = pd.DataFrame(data_dict)
# Example: Converting a column from one currency to another
# using rates from 01/01/2018.
# .. code-block:: python
# example_dataframe.convert_currency('a', from_currency='USD',
# to_currency='EUR', historical_date=date(2018,1,1))
# Output:
# .. code-block:: python
# a Bell__Chart decorated-elephant animals cities
# 0 1.029370 0.333333 0.004274 rabbit Cambridge
# 1 2.048056 0.285714 0.153846 leopard Shanghai
# 2 2.697084 1.500000 0.017964 lion Basel
# 3 1.029370 0.333333 0.004274 rabbit Cambridge
# 4 2.048056 0.285714 0.153846 leopard Shanghai
# 5 2.697084 1.500000 0.017964 lion Basel
# 6 1.029370 0.333333 0.004274 rabbit Cambridge
# 7 2.048056 0.285714 0.153846 leopard Shanghai
# 8 2.697084 1.500000 0.017964 lion Basel
# """
# rate = _convert_currency(
# api_key, from_currency, to_currency, historical_date
# )
# if make_new_column:
# # new_column_name = column_name + "_" + to_currency
# column_name = column_name + "_" + to_currency
# df = df.assign(column_name=df[column_name] * rate)
# return df
@lru_cache(maxsize=32)
def _inflate_currency(
country: str = None, currency_year: int = None, to_year: int = None
) -> float:
"""
Currency inflation for Pandas DataFrame column.
Helper function for `inflate_currency` method.
The API used is the World Bank Indicator API:
https://datahelpdesk.worldbank.org/knowledgebase/articles/889392-about-the-indicators-api-documentation
"""
# Check all inputs are correct data type
check("country", country, [str])
check("currency_year", currency_year, [int])
check("to_year", to_year, [int])
# Get WB country abbreviation
_check_wb_country(country)
if country in wb_country_dict.keys():
country = wb_country_dict[country]
else:
# `country` is already a correct abbreviation; do nothing
pass
_check_wb_years(currency_year)
_check_wb_years(to_year)
url = (
"https://api.worldbank.org/v2/country/"
+ country
+ "/indicator/FP.CPI.TOTL?date="
+ str(min(currency_year, to_year))
+ ":"
+ str(max(currency_year, to_year))
+ "&format=json"
)
result = requests.get(url)
if result.status_code != 200:
raise ConnectionError(
"WB Indicator API failed to receive a 200 "
"response from the server. "
"Please try again later."
)
# The API returns a list of two items;
# the second item in the list is what we want
inflation_dict = json.loads(result.text)[1]
# Error checking
if inflation_dict is None:
raise ValueError(
"The WB Indicator API returned nothing. "
"This likely means the currency_year and "
"to_year are outside of the year range for "
"which the WB has inflation data for the "
"specified country."
)
# Create new dict with only the year and inflation values
inflation_dict_ready = {
int(inflation_dict[i]["date"]): float(inflation_dict[i]["value"])
for i in range(len(inflation_dict))
if inflation_dict[i]["value"] is not None
}
# Error catching
if currency_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {currency_year} for {country}."
)
if to_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {to_year} for {country}."
)
inflator = (
inflation_dict_ready[to_year] / inflation_dict_ready[currency_year]
)
return inflator
@pf.register_dataframe_method
def inflate_currency(
df: pd.DataFrame,
column_name: str = None,
country: str = None,
currency_year: int = None,
to_year: int = None,
make_new_column: bool = False,
) -> pd.DataFrame:
"""
Inflates a column of monetary values from one year to another, based on
the currency's country.
The provided country can be any economy name or code from the World Bank
[list of economies]
(https://databank.worldbank.org/data/download/site-content/CLASS.xls).
**Note**: This method mutates the original DataFrame.
Method chaining usage example:
>>> import pandas as pd
>>> import janitor.finance
>>> df = pd.DataFrame({"profit":[100.10, 200.20, 300.30, 400.40, 500.50]})
>>> df
profit
0 100.1
1 200.2
2 300.3
3 400.4
4 500.5
>>> df.inflate_currency(
... column_name='profit',
... country='USA',
... currency_year=2015,
... to_year=2018,
... make_new_column=True
... )
profit profit_2018
0 100.1 106.050596
1 200.2 212.101191
2 300.3 318.151787
3 400.4 424.202382
4 500.5 530.252978
:param df: A pandas DataFrame.
:param column_name: Name of the column containing monetary
values to inflate.
:param country: The country associated with the currency being inflated.
May be any economy or code from the World Bank [List of economies]
(https://databank.worldbank.org/data/download/site-content/CLASS.xls).
:param currency_year: The currency year to inflate from.
The year should be 1960 or later.
:param to_year: The currency year to inflate to.
The year should be 1960 or later.
:param make_new_column: Generates new column for inflated currency if
True, otherwise, inflates currency in place.
:returns: The dataframe with inflated currency column.
"""
inflator = _inflate_currency(country, currency_year, to_year)
if make_new_column:
new_column_name = column_name + "_" + str(to_year)
df[new_column_name] = df[column_name] * inflator
else:
df[column_name] = df[column_name] * inflator
return df
def convert_stock(stock_symbol: str) -> str:
"""
This function takes in a stock symbol as a parameter,
queries an API for the companies full name and returns
it
Functional usage example:
```python
import janitor.finance
janitor.finance.convert_stock("aapl")
```
:param stock_symbol: Stock ticker Symbol
:raises ConnectionError: Internet connection is not available
:returns: Full company name
"""
if is_connected("www.google.com"):
stock_symbol = stock_symbol.upper()
return get_symbol(stock_symbol)
else:
raise ConnectionError(
"Connection Error: Client Not Connected to Internet"
)
def get_symbol(symbol: str):
"""
This is a helper function to get a companies full
name based on the stock symbol.
Functional usage example:
```python
import janitor.finance
janitor.finance.get_symbol("aapl")
```
:param symbol: This is our stock symbol that we use
to query the api for the companies full name.
:return: Company full name
"""
result = requests.get(
"http://d.yimg.com/autoc."
+ "finance.yahoo.com/autoc?query={}®ion=1&lang=en".format(symbol)
).json()
for x in result["ResultSet"]["Result"]:
if x["symbol"] == symbol:
return x["name"]
else:
return None
|
datapackage_to_markdown
|
datapackage: datapackage schema as a dictionary
returns: str with the Markdown documentation
|
import os
import subprocess
from tempfile import NamedTemporaryFile
from jinja2 import Template
# This file designed in a way that is independent of Django
# in order to be easy (but changes are required) to be used
# outside Django in the future
# That's why is using jinja2 as a template language instead of
# Django's template language.
#
# Example of use:
# Make sure to have jinja2 template language:
# python3 -m venv venv
# pip3 install jinja2
#
# In a Python file:
# import json
# import main # or the name that this file is saved as...
#
# datapackage = json.load(open("datapackage.json"))
# main.datapackage_to_markdown(datapackage)
# MASKED: datapackage_to_markdown function (lines 27-35)
def datapackage_to_pdf(datapackage):
"""
datapackage: datapackage schema as a dictionary
returns: binary content with the PDF or None if the conversion failed.
"""
markdown = datapackage_to_markdown(datapackage)
f = NamedTemporaryFile(suffix='.pdf', delete=False)
f.close()
command_line = ['pandoc', '--to=latex', f'--output={f.name}']
try:
pandoc_process = subprocess.run(command_line,
input=markdown)
except FileNotFoundError:
os.unlink(f.name)
raise OSError(f'FileNotFoundError trying to execute: {command_line}')
except subprocess.CalledProcessError:
os.unlink(f.name)
raise RuntimeError(f'CalledProcessError trying to execute: {command_line}')
if pandoc_process.returncode != 0:
os.unlink(f.name)
raise RuntimeError(f'Command {command_line} returned a PDF file of size 0')
pdf_file = open(f.name, 'rb')
pdf_content = pdf_file.read()
os.unlink(f.name)
return pdf_content
template_to_md = '''# {{ title }}
## Dataset description
{{ description }}
{% if contributors|length == 1 %}
## Contributor
{% else %}
## Contributors
{% endif %}{% for contributor in contributors %} * {{ contributor.title }} ({{ contributor.role }})
{% endfor %}
{% if keywords|length == 1 %}
## Keyword
{% else %}## Keywords
{% endif %}{% for keyword in keywords %} * {{ keyword }}
{% endfor %}
## Version
{{ version }}
## Homepage
[{{ homepage }}]({{ homepage }})
{% if licenses|length == 1 %}
## Dataset license
{% else %}
## Dataset license
{% endif %}{% for license in licenses %} * {{ license.title }} ([{{ license.name }}]({{ license.path }}))
{% endfor %}
## Resources
{% for resource in resources %}
### {{ resource.title }}
* Name: {{ resource.name }}
* Profile: {{ resource.profile }}
* Path: {{ resource.path }}
{% if resource.format %} * Format: {{ resource.format }}{% endif %}
{% if resource.encoding %} * Encoding: {{ resource.encoding }}{% endif %}
{% if resource.description %} * Desription: {{ resource.description }}{% endif %}
{% if resource.schema.fields %}
#### Fields
{% for field in resource.schema.fields %} * **{{ field.name }}** ({{ field.type }}): {{ field.description }}
{% endfor %}
{% endif %}
{% endfor %}
'''
|
def datapackage_to_markdown(datapackage):
"""
datapackage: datapackage schema as a dictionary
returns: str with the Markdown documentation
"""
template = Template(template_to_md)
rendered = template.render(datapackage)
return rendered.encode('utf-8')
| 27
| 35
|
import os
import subprocess
from tempfile import NamedTemporaryFile
from jinja2 import Template
# This file designed in a way that is independent of Django
# in order to be easy (but changes are required) to be used
# outside Django in the future
# That's why is using jinja2 as a template language instead of
# Django's template language.
#
# Example of use:
# Make sure to have jinja2 template language:
# python3 -m venv venv
# pip3 install jinja2
#
# In a Python file:
# import json
# import main # or the name that this file is saved as...
#
# datapackage = json.load(open("datapackage.json"))
# main.datapackage_to_markdown(datapackage)
def datapackage_to_markdown(datapackage):
"""
datapackage: datapackage schema as a dictionary
returns: str with the Markdown documentation
"""
template = Template(template_to_md)
rendered = template.render(datapackage)
return rendered.encode('utf-8')
def datapackage_to_pdf(datapackage):
"""
datapackage: datapackage schema as a dictionary
returns: binary content with the PDF or None if the conversion failed.
"""
markdown = datapackage_to_markdown(datapackage)
f = NamedTemporaryFile(suffix='.pdf', delete=False)
f.close()
command_line = ['pandoc', '--to=latex', f'--output={f.name}']
try:
pandoc_process = subprocess.run(command_line,
input=markdown)
except FileNotFoundError:
os.unlink(f.name)
raise OSError(f'FileNotFoundError trying to execute: {command_line}')
except subprocess.CalledProcessError:
os.unlink(f.name)
raise RuntimeError(f'CalledProcessError trying to execute: {command_line}')
if pandoc_process.returncode != 0:
os.unlink(f.name)
raise RuntimeError(f'Command {command_line} returned a PDF file of size 0')
pdf_file = open(f.name, 'rb')
pdf_content = pdf_file.read()
os.unlink(f.name)
return pdf_content
template_to_md = '''# {{ title }}
## Dataset description
{{ description }}
{% if contributors|length == 1 %}
## Contributor
{% else %}
## Contributors
{% endif %}{% for contributor in contributors %} * {{ contributor.title }} ({{ contributor.role }})
{% endfor %}
{% if keywords|length == 1 %}
## Keyword
{% else %}## Keywords
{% endif %}{% for keyword in keywords %} * {{ keyword }}
{% endfor %}
## Version
{{ version }}
## Homepage
[{{ homepage }}]({{ homepage }})
{% if licenses|length == 1 %}
## Dataset license
{% else %}
## Dataset license
{% endif %}{% for license in licenses %} * {{ license.title }} ([{{ license.name }}]({{ license.path }}))
{% endfor %}
## Resources
{% for resource in resources %}
### {{ resource.title }}
* Name: {{ resource.name }}
* Profile: {{ resource.profile }}
* Path: {{ resource.path }}
{% if resource.format %} * Format: {{ resource.format }}{% endif %}
{% if resource.encoding %} * Encoding: {{ resource.encoding }}{% endif %}
{% if resource.description %} * Desription: {{ resource.description }}{% endif %}
{% if resource.schema.fields %}
#### Fields
{% for field in resource.schema.fields %} * **{{ field.name }}** ({{ field.type }}): {{ field.description }}
{% endfor %}
{% endif %}
{% endfor %}
'''
|
datapackage_to_pdf
|
datapackage: datapackage schema as a dictionary
returns: binary content with the PDF or None if the conversion failed.
|
import os
import subprocess
from tempfile import NamedTemporaryFile
from jinja2 import Template
# This file designed in a way that is independent of Django
# in order to be easy (but changes are required) to be used
# outside Django in the future
# That's why is using jinja2 as a template language instead of
# Django's template language.
#
# Example of use:
# Make sure to have jinja2 template language:
# python3 -m venv venv
# pip3 install jinja2
#
# In a Python file:
# import json
# import main # or the name that this file is saved as...
#
# datapackage = json.load(open("datapackage.json"))
# main.datapackage_to_markdown(datapackage)
def datapackage_to_markdown(datapackage):
"""
datapackage: datapackage schema as a dictionary
returns: str with the Markdown documentation
"""
template = Template(template_to_md)
rendered = template.render(datapackage)
return rendered.encode('utf-8')
# MASKED: datapackage_to_pdf function (lines 38-69)
template_to_md = '''# {{ title }}
## Dataset description
{{ description }}
{% if contributors|length == 1 %}
## Contributor
{% else %}
## Contributors
{% endif %}{% for contributor in contributors %} * {{ contributor.title }} ({{ contributor.role }})
{% endfor %}
{% if keywords|length == 1 %}
## Keyword
{% else %}## Keywords
{% endif %}{% for keyword in keywords %} * {{ keyword }}
{% endfor %}
## Version
{{ version }}
## Homepage
[{{ homepage }}]({{ homepage }})
{% if licenses|length == 1 %}
## Dataset license
{% else %}
## Dataset license
{% endif %}{% for license in licenses %} * {{ license.title }} ([{{ license.name }}]({{ license.path }}))
{% endfor %}
## Resources
{% for resource in resources %}
### {{ resource.title }}
* Name: {{ resource.name }}
* Profile: {{ resource.profile }}
* Path: {{ resource.path }}
{% if resource.format %} * Format: {{ resource.format }}{% endif %}
{% if resource.encoding %} * Encoding: {{ resource.encoding }}{% endif %}
{% if resource.description %} * Desription: {{ resource.description }}{% endif %}
{% if resource.schema.fields %}
#### Fields
{% for field in resource.schema.fields %} * **{{ field.name }}** ({{ field.type }}): {{ field.description }}
{% endfor %}
{% endif %}
{% endfor %}
'''
|
def datapackage_to_pdf(datapackage):
"""
datapackage: datapackage schema as a dictionary
returns: binary content with the PDF or None if the conversion failed.
"""
markdown = datapackage_to_markdown(datapackage)
f = NamedTemporaryFile(suffix='.pdf', delete=False)
f.close()
command_line = ['pandoc', '--to=latex', f'--output={f.name}']
try:
pandoc_process = subprocess.run(command_line,
input=markdown)
except FileNotFoundError:
os.unlink(f.name)
raise OSError(f'FileNotFoundError trying to execute: {command_line}')
except subprocess.CalledProcessError:
os.unlink(f.name)
raise RuntimeError(f'CalledProcessError trying to execute: {command_line}')
if pandoc_process.returncode != 0:
os.unlink(f.name)
raise RuntimeError(f'Command {command_line} returned a PDF file of size 0')
pdf_file = open(f.name, 'rb')
pdf_content = pdf_file.read()
os.unlink(f.name)
return pdf_content
| 38
| 69
|
import os
import subprocess
from tempfile import NamedTemporaryFile
from jinja2 import Template
# This file designed in a way that is independent of Django
# in order to be easy (but changes are required) to be used
# outside Django in the future
# That's why is using jinja2 as a template language instead of
# Django's template language.
#
# Example of use:
# Make sure to have jinja2 template language:
# python3 -m venv venv
# pip3 install jinja2
#
# In a Python file:
# import json
# import main # or the name that this file is saved as...
#
# datapackage = json.load(open("datapackage.json"))
# main.datapackage_to_markdown(datapackage)
def datapackage_to_markdown(datapackage):
"""
datapackage: datapackage schema as a dictionary
returns: str with the Markdown documentation
"""
template = Template(template_to_md)
rendered = template.render(datapackage)
return rendered.encode('utf-8')
def datapackage_to_pdf(datapackage):
"""
datapackage: datapackage schema as a dictionary
returns: binary content with the PDF or None if the conversion failed.
"""
markdown = datapackage_to_markdown(datapackage)
f = NamedTemporaryFile(suffix='.pdf', delete=False)
f.close()
command_line = ['pandoc', '--to=latex', f'--output={f.name}']
try:
pandoc_process = subprocess.run(command_line,
input=markdown)
except FileNotFoundError:
os.unlink(f.name)
raise OSError(f'FileNotFoundError trying to execute: {command_line}')
except subprocess.CalledProcessError:
os.unlink(f.name)
raise RuntimeError(f'CalledProcessError trying to execute: {command_line}')
if pandoc_process.returncode != 0:
os.unlink(f.name)
raise RuntimeError(f'Command {command_line} returned a PDF file of size 0')
pdf_file = open(f.name, 'rb')
pdf_content = pdf_file.read()
os.unlink(f.name)
return pdf_content
template_to_md = '''# {{ title }}
## Dataset description
{{ description }}
{% if contributors|length == 1 %}
## Contributor
{% else %}
## Contributors
{% endif %}{% for contributor in contributors %} * {{ contributor.title }} ({{ contributor.role }})
{% endfor %}
{% if keywords|length == 1 %}
## Keyword
{% else %}## Keywords
{% endif %}{% for keyword in keywords %} * {{ keyword }}
{% endfor %}
## Version
{{ version }}
## Homepage
[{{ homepage }}]({{ homepage }})
{% if licenses|length == 1 %}
## Dataset license
{% else %}
## Dataset license
{% endif %}{% for license in licenses %} * {{ license.title }} ([{{ license.name }}]({{ license.path }}))
{% endfor %}
## Resources
{% for resource in resources %}
### {{ resource.title }}
* Name: {{ resource.name }}
* Profile: {{ resource.profile }}
* Path: {{ resource.path }}
{% if resource.format %} * Format: {{ resource.format }}{% endif %}
{% if resource.encoding %} * Encoding: {{ resource.encoding }}{% endif %}
{% if resource.description %} * Desription: {{ resource.description }}{% endif %}
{% if resource.schema.fields %}
#### Fields
{% for field in resource.schema.fields %} * **{{ field.name }}** ({{ field.type }}): {{ field.description }}
{% endfor %}
{% endif %}
{% endfor %}
'''
|
closest
|
Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
|
"""Template helper methods for rendering strings with Home Assistant data."""
from __future__ import annotations
from ast import literal_eval
import asyncio
import base64
import collections.abc
from contextlib import suppress
from contextvars import ContextVar
from datetime import datetime, timedelta
from functools import partial, wraps
import json
import logging
import math
from operator import attrgetter
import random
import re
import sys
from typing import Any, Generator, Iterable, cast
from urllib.parse import urlencode as urllib_urlencode
import weakref
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace # type: ignore
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
STATE_UNKNOWN,
)
from homeassistant.core import (
HomeAssistant,
State,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import entity_registry, location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.thread import ThreadWithException
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_ENVIRONMENT_LIMITED = "template.environment_limited"
_ENVIRONMENT_STRICT = "template.environment_strict"
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#")
# Match "simple" ints and floats. -1.0, 1, +5, 5.0
_IS_NUMERIC = re.compile(r"^[+-]?(?!0\d)\d*(?:\.\d*)?$")
_RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"}
_GROUP_DOMAIN_PREFIX = "group."
_COLLECTABLE_STATE_ATTRIBUTES = {
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"name",
}
ALL_STATES_RATE_LIMIT = timedelta(minutes=1)
DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1)
template_cv: ContextVar[str | None] = ContextVar("template_cv", default=None)
@bind_hass
def attach(hass: HomeAssistant, obj: Any) -> None:
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, collections.abc.Mapping):
for child_key, child_value in obj.items():
attach(hass, child_key)
attach(hass, child_value)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(
value: Any, variables: TemplateVarsType = None, limited: bool = False
) -> Any:
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, collections.abc.Mapping):
return {
render_complex(key, variables): render_complex(item, variables)
for key, item in value.items()
}
if isinstance(value, Template):
return value.async_render(variables, limited=limited)
return value
def is_complex(value: Any) -> bool:
"""Test if data structure is a complex template."""
if isinstance(value, Template):
return True
if isinstance(value, list):
return any(is_complex(val) for val in value)
if isinstance(value, collections.abc.Mapping):
return any(is_complex(val) for val in value.keys()) or any(
is_complex(val) for val in value.values()
)
return False
def is_template_string(maybe_template: str) -> bool:
"""Check if the input is a Jinja2 template."""
return _RE_JINJA_DELIMITERS.search(maybe_template) is not None
class ResultWrapper:
"""Result wrapper class to store render result."""
render_result: str | None
def gen_result_wrapper(kls):
"""Generate a result wrapper."""
class Wrapper(kls, ResultWrapper):
"""Wrapper of a kls that can store render_result."""
def __init__(self, *args: tuple, render_result: str | None = None) -> None:
super().__init__(*args)
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
# Can't get set repr to work
if kls is set:
return str(set(self))
return cast(str, kls.__str__(self))
return self.render_result
return Wrapper
class TupleWrapper(tuple, ResultWrapper):
"""Wrap a tuple."""
# This is all magic to be allowed to subclass a tuple.
def __new__(cls, value: tuple, *, render_result: str | None = None) -> TupleWrapper:
"""Create a new tuple class."""
return super().__new__(cls, tuple(value))
# pylint: disable=super-init-not-called
def __init__(self, value: tuple, *, render_result: str | None = None):
"""Initialize a new tuple class."""
self.render_result = render_result
def __str__(self) -> str:
"""Return string representation."""
if self.render_result is None:
return super().__str__()
return self.render_result
RESULT_WRAPPERS: dict[type, type] = {
kls: gen_result_wrapper(kls) # type: ignore[no-untyped-call]
for kls in (list, dict, set)
}
RESULT_WRAPPERS[tuple] = TupleWrapper
def _true(arg: Any) -> bool:
return True
def _false(arg: Any) -> bool:
return False
class RenderInfo:
"""Holds information about a template render."""
def __init__(self, template):
"""Initialise."""
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self.filter = _true
self._result: str | None = None
self.is_static = False
self.exception: TemplateError | None = None
self.all_states = False
self.all_states_lifecycle = False
self.domains = set()
self.domains_lifecycle = set()
self.entities = set()
self.rate_limit: timedelta | None = None
self.has_time = False
def __repr__(self) -> str:
"""Representation of RenderInfo."""
return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}"
def _filter_domains_and_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific domains or entities."""
return (
split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities
)
def _filter_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific entities."""
return entity_id in self.entities
def _filter_lifecycle_domains(self, entity_id: str) -> bool:
"""Template should re-render if the entity is added or removed with domains watched."""
return split_entity_id(entity_id)[0] in self.domains_lifecycle
def result(self) -> str:
"""Results of the template computation."""
if self.exception is not None:
raise self.exception
return cast(str, self._result)
def _freeze_static(self) -> None:
self.is_static = True
self._freeze_sets()
self.all_states = False
def _freeze_sets(self) -> None:
self.entities = frozenset(self.entities)
self.domains = frozenset(self.domains)
self.domains_lifecycle = frozenset(self.domains_lifecycle)
def _freeze(self) -> None:
self._freeze_sets()
if self.rate_limit is None:
if self.all_states or self.exception:
self.rate_limit = ALL_STATES_RATE_LIMIT
elif self.domains or self.domains_lifecycle:
self.rate_limit = DOMAIN_STATES_RATE_LIMIT
if self.exception:
return
if not self.all_states_lifecycle:
if self.domains_lifecycle:
self.filter_lifecycle = self._filter_lifecycle_domains
else:
self.filter_lifecycle = _false
if self.all_states:
return
if self.domains:
self.filter = self._filter_domains_and_entities
elif self.entities:
self.filter = self._filter_entities
else:
self.filter = _false
class Template:
"""Class to hold a template and manage caching and rendering."""
__slots__ = (
"__weakref__",
"template",
"hass",
"is_static",
"_compiled_code",
"_compiled",
"_exc_info",
"_limited",
"_strict",
)
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template.strip()
self._compiled_code = None
self._compiled: jinja2.Template | None = None
self.hass = hass
self.is_static = not is_template_string(template)
self._exc_info = None
self._limited = None
self._strict = None
@property
def _env(self) -> TemplateEnvironment:
if self.hass is None:
return _NO_HASS_ENV
if self._limited:
wanted_env = _ENVIRONMENT_LIMITED
elif self._strict:
wanted_env = _ENVIRONMENT_STRICT
else:
wanted_env = _ENVIRONMENT
ret: TemplateEnvironment | None = self.hass.data.get(wanted_env)
if ret is None:
ret = self.hass.data[wanted_env] = TemplateEnvironment(self.hass, self._limited, self._strict) # type: ignore[no-untyped-call]
return ret
def ensure_valid(self) -> None:
"""Return if template is valid."""
if self.is_static or self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template) # type: ignore[no-untyped-call]
except jinja2.TemplateError as err:
raise TemplateError(err) from err
def render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
return run_callback_threadsafe(
self.hass.loop,
partial(self.async_render, variables, parse_result, limited, **kwargs),
).result()
@callback
def async_render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
strict: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
compiled = self._compiled or self._ensure_compiled(limited, strict)
if variables is not None:
kwargs.update(variables)
try:
render_result = _render_with_context(self.template, compiled, **kwargs)
except Exception as err:
raise TemplateError(err) from err
render_result = render_result.strip()
if self.hass.config.legacy_templates or not parse_result:
return render_result
return self._parse_result(render_result)
def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use
"""Parse the result."""
try:
result = literal_eval(render_result)
if type(result) in RESULT_WRAPPERS:
result = RESULT_WRAPPERS[type(result)](
result, render_result=render_result
)
# If the literal_eval result is a string, use the original
# render, by not returning right here. The evaluation of strings
# resulting in strings impacts quotes, to avoid unexpected
# output; use the original render instead of the evaluated one.
# Complex and scientific values are also unexpected. Filter them out.
if (
# Filter out string and complex numbers
not isinstance(result, (str, complex))
and (
# Pass if not numeric and not a boolean
not isinstance(result, (int, float))
# Or it's a boolean (inherit from int)
or isinstance(result, bool)
# Or if it's a digit
or _IS_NUMERIC.match(render_result) is not None
)
):
return result
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return render_result
async def async_render_will_timeout(
self,
timeout: float,
variables: TemplateVarsType = None,
strict: bool = False,
**kwargs: Any,
) -> bool:
"""Check to see if rendering a template will timeout during render.
This is intended to check for expensive templates
that will make the system unstable. The template
is rendered in the executor to ensure it does not
tie up the event loop.
This function is not a security control and is only
intended to be used as a safety check when testing
templates.
This method must be run in the event loop.
"""
if self.is_static:
return False
compiled = self._compiled or self._ensure_compiled(strict=strict)
if variables is not None:
kwargs.update(variables)
self._exc_info = None
finish_event = asyncio.Event()
def _render_template() -> None:
try:
_render_with_context(self.template, compiled, **kwargs)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
self._exc_info = sys.exc_info()
finally:
run_callback_threadsafe(self.hass.loop, finish_event.set)
try:
template_render_thread = ThreadWithException(target=_render_template)
template_render_thread.start()
await asyncio.wait_for(finish_event.wait(), timeout=timeout)
if self._exc_info:
raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2]))
except asyncio.TimeoutError:
template_render_thread.raise_exc(TimeoutError)
return True
finally:
template_render_thread.join()
return False
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, strict: bool = False, **kwargs: Any
) -> RenderInfo:
"""Render the template and collect an entity filter."""
assert self.hass and _RENDER_INFO not in self.hass.data
render_info = RenderInfo(self) # type: ignore[no-untyped-call]
# pylint: disable=protected-access
if self.is_static:
render_info._result = self.template.strip()
render_info._freeze_static()
return render_info
self.hass.data[_RENDER_INFO] = render_info
try:
render_info._result = self.async_render(variables, strict=strict, **kwargs)
except TemplateError as ex:
render_info.exception = ex
finally:
del self.hass.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
if self.is_static:
return self.template
return run_callback_threadsafe(
self.hass.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self.is_static:
return self.template
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
with suppress(ValueError, TypeError):
variables["value_json"] = json.loads(value)
try:
return _render_with_context(
self.template, self._compiled, **variables
).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(
self, limited: bool = False, strict: bool = False
) -> jinja2.Template:
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, "hass variable not set on template"
assert (
self._limited is None or self._limited == limited
), "can't change between limited and non limited template"
assert (
self._strict is None or self._strict == strict
), "can't change between strict and non strict template"
assert not (strict and limited), "can't combine strict and limited template"
self._limited = limited
self._strict = strict
env = self._env
self._compiled = cast(
jinja2.Template,
jinja2.Template.from_code(env, self._compiled_code, env.globals, None),
)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (
self.__class__ == other.__class__
and self.template == other.template
and self.hass == other.hass
)
def __hash__(self) -> int:
"""Hash code for template."""
return hash(self.template)
def __repr__(self) -> str:
"""Representation of Template."""
return 'Template("' + self.template + '")'
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
if "." in name:
return _get_state_if_valid(self._hass, name)
if name in _RESERVED_NAMES:
return None
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._hass, name)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_all(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states = True
def _collect_all_lifecycle(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states_lifecycle = True
def __iter__(self):
"""Return all states."""
self._collect_all()
return _state_generator(self._hass, None)
def __len__(self) -> int:
"""Return number of states."""
self._collect_all_lifecycle()
return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id):
"""Return the states."""
state = _get_state(self._hass, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self) -> str:
"""Representation of All States."""
return "<template AllStates>"
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass: HomeAssistant, domain: str) -> None:
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _get_state_if_valid(self._hass, f"{self._domain}.{name}")
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_domain(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains.add(self._domain)
def _collect_domain_lifecycle(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains_lifecycle.add(self._domain)
def __iter__(self):
"""Return the iteration over all the states."""
self._collect_domain()
return _state_generator(self._hass, self._domain)
def __len__(self) -> int:
"""Return number of states."""
self._collect_domain_lifecycle()
return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str:
"""Representation of Domain States."""
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
"""Class to represent a state object in a template."""
__slots__ = ("_hass", "_state", "_collect")
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, hass: HomeAssistant, state: State, collect: bool = True) -> None:
"""Initialize template state."""
self._hass = hass
self._state = state
self._collect = collect
def _collect_state(self) -> None:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
def __getitem__(self, item):
"""Return a property as an attribute for jinja."""
if item in _COLLECTABLE_STATE_ATTRIBUTES:
# _collect_state inlined here for performance
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
return getattr(self._state, item)
if item == "entity_id":
return self._state.entity_id
if item == "state_with_unit":
return self.state_with_unit
raise KeyError
@property
def entity_id(self):
"""Wrap State.entity_id.
Intentionally does not collect state
"""
return self._state.entity_id
@property
def state(self):
"""Wrap State.state."""
self._collect_state()
return self._state.state
@property
def attributes(self):
"""Wrap State.attributes."""
self._collect_state()
return self._state.attributes
@property
def last_changed(self):
"""Wrap State.last_changed."""
self._collect_state()
return self._state.last_changed
@property
def last_updated(self):
"""Wrap State.last_updated."""
self._collect_state()
return self._state.last_updated
@property
def context(self):
"""Wrap State.context."""
self._collect_state()
return self._state.context
@property
def domain(self):
"""Wrap State.domain."""
self._collect_state()
return self._state.domain
@property
def object_id(self):
"""Wrap State.object_id."""
self._collect_state()
return self._state.object_id
@property
def name(self):
"""Wrap State.name."""
self._collect_state()
return self._state.name
@property
def state_with_unit(self) -> str:
"""Return the state concatenated with the unit if available."""
self._collect_state()
unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return f"{self._state.state} {unit}" if unit else self._state.state
def __eq__(self, other: Any) -> bool:
"""Ensure we collect on equality check."""
self._collect_state()
return self._state.__eq__(other)
def __repr__(self) -> str:
"""Representation of Template State."""
return f"<template TemplateState({self._state.__repr__()})>"
def _collect_state(hass: HomeAssistant, entity_id: str) -> None:
entity_collect = hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.entities.add(entity_id)
def _state_generator(hass: HomeAssistant, domain: str | None) -> Generator:
"""State generator for a domain or all states."""
for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")):
yield TemplateState(hass, state, collect=False)
def _get_state_if_valid(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
state = hass.states.get(entity_id)
if state is None and not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'") # type: ignore
return _get_template_state_from_state(hass, entity_id, state)
def _get_state(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id))
def _get_template_state_from_state(
hass: HomeAssistant, entity_id: str, state: State | None
) -> TemplateState | None:
if state is None:
# Only need to collect if none, if not none collect first actual
# access to the state properties in the state wrapper.
_collect_state(hass, entity_id)
return None
return TemplateState(hass, state)
def _resolve_state(
hass: HomeAssistant, entity_id_or_state: Any
) -> State | TemplateState | None:
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(hass, entity_id_or_state)
return None
def result_as_boolean(template_result: str | None) -> bool:
"""Convert the template result to a boolean.
True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy
False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy
"""
try:
# Import here, not at top-level to avoid circular import
from homeassistant.helpers import ( # pylint: disable=import-outside-toplevel
config_validation as cv,
)
return cv.boolean(template_result)
except vol.Invalid:
return False
def expand(hass: HomeAssistant, *args: Any) -> Iterable[State]:
"""Expand out any groups into entity states."""
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(hass, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, collections.abc.Iterable):
search += entity
continue
else:
# ignore other types
continue
if entity_id.startswith(_GROUP_DOMAIN_PREFIX):
# Collect state will be called in here since it's wrapped
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
_collect_state(hass, entity_id)
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def device_entities(hass: HomeAssistant, device_id: str) -> Iterable[str]:
"""Get entity ids for entities tied to a device."""
entity_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_device(entity_reg, device_id)
return [entry.entity_id for entry in entries]
# MASKED: closest function (lines 909-969)
def closest_filter(hass, *args):
"""Call closest as a filter. Need to reorder arguments."""
new_args = list(args[1:])
new_args.append(args[0])
return closest(hass, *new_args)
def distance(hass, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, str) and not valid_entity_id(value):
point_state = None
else:
point_state = _resolve_state(hass, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return hass.config.distance(*locations[0])
return hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS
)
def is_state(hass: HomeAssistant, entity_id: str, state: State) -> bool:
"""Test if a state is a specific value."""
state_obj = _get_state(hass, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(hass, entity_id, name, value):
"""Test if a state's attribute is a specific value."""
attr = state_attr(hass, entity_id, name)
return attr is not None and attr == value
def state_attr(hass, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = _get_state(hass, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def now(hass):
"""Record fetching now."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.now()
def utcnow(hass):
"""Record fetching utcnow."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.utcnow()
def forgiving_round(value, precision=0, method="common"):
"""Round accepted strings."""
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
"""Filter to get arc sine of the value."""
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
"""Filter to get arc cosine of the value."""
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
"""Filter to get arc tangent of the value."""
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
"""Filter to calculate four quadrant arc tangent of y / x."""
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError, TypeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
"""Convert a JSON string to an object."""
return json.loads(value)
def to_json(value):
"""Convert an object to a JSON string."""
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
def relative_time(value):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
"""
if not isinstance(value, datetime):
return value
if not value.tzinfo:
value = dt_util.as_local(value)
if dt_util.now() < value:
return value
return dt_util.get_age(value)
def urlencode(value):
"""Urlencode dictionary and return as UTF-8 string."""
return urllib_urlencode(value).encode("utf-8")
def _render_with_context(
template_str: str, template: jinja2.Template, **kwargs: Any
) -> str:
"""Store template being rendered in a ContextVar to aid error handling."""
template_cv.set(template_str)
return template.render(**kwargs)
class LoggingUndefined(jinja2.Undefined):
"""Log on undefined variables."""
def _log_message(self):
template = template_cv.get() or ""
_LOGGER.warning(
"Template variable warning: %s when rendering '%s'",
self._undefined_message,
template,
)
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as ex:
template = template_cv.get() or ""
_LOGGER.error(
"Template variable error: %s when rendering '%s'",
self._undefined_message,
template,
)
raise ex
def __str__(self):
"""Log undefined __str___."""
self._log_message()
return super().__str__()
def __iter__(self):
"""Log undefined __iter___."""
self._log_message()
return super().__iter__()
def __bool__(self):
"""Log undefined __bool___."""
self._log_message()
return super().__bool__()
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def __init__(self, hass, limited=False, strict=False):
"""Initialise template environment."""
if not strict:
undefined = LoggingUndefined
else:
undefined = jinja2.StrictUndefined
super().__init__(undefined=undefined)
self.hass = hass
self.template_cache = weakref.WeakValueDictionary()
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["as_local"] = dt_util.as_local
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["as_local"] = dt_util.as_local
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = relative_time
self.globals["timedelta"] = timedelta
self.globals["strptime"] = strptime
self.globals["urlencode"] = urlencode
self.globals["max"] = max
self.globals["min"] = min
if hass is None:
return
# We mark these as a context functions to ensure they get
# evaluated fresh with every execution, rather than executed
# at compile time and the value stored. The context itself
# can be discarded, we only need to get at the hass object.
def hassfunction(func):
"""Wrap function that depend on hass."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(hass, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["device_entities"] = hassfunction(device_entities)
self.filters["device_entities"] = contextfilter(self.globals["device_entities"])
if limited:
# Only device_entities is available to limited templates, mark other
# functions and filters as unsupported.
def unsupported(name):
def warn_unsupported(*args, **kwargs):
raise TemplateError(
f"Use of '{name}' is not supported in limited templates"
)
return warn_unsupported
hass_globals = [
"closest",
"distance",
"expand",
"is_state",
"is_state_attr",
"state_attr",
"states",
"utcnow",
"now",
]
hass_filters = ["closest", "expand"]
for glob in hass_globals:
self.globals[glob] = unsupported(glob)
for filt in hass_filters:
self.filters[filt] = unsupported(filt)
return
self.globals["expand"] = hassfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = hassfunction(closest)
self.filters["closest"] = contextfilter(hassfunction(closest_filter))
self.globals["distance"] = hassfunction(distance)
self.globals["is_state"] = hassfunction(is_state)
self.globals["is_state_attr"] = hassfunction(is_state_attr)
self.globals["state_attr"] = hassfunction(state_attr)
self.globals["states"] = AllStates(hass)
self.globals["utcnow"] = hassfunction(utcnow)
self.globals["now"] = hassfunction(now)
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
if isinstance(obj, (AllStates, DomainStates, TemplateState)):
return attr[0] != "_"
if isinstance(obj, Namespace):
return True
return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile the template."""
if (
name is not None
or filename is not None
or raw is not False
or defer_init is not False
):
# If there are any non-default keywords args, we do
# not cache. In prodution we currently do not have
# any instance of this.
return super().compile(source, name, filename, raw, defer_init)
cached = self.template_cache.get(source)
if cached is None:
cached = self.template_cache[source] = super().compile(source)
return cached
_NO_HASS_ENV = TemplateEnvironment(None) # type: ignore[no-untyped-call]
|
def closest(hass, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
"""
if len(args) == 1:
latitude = hass.config.latitude
longitude = hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(hass, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(hass, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
| 909
| 969
|
"""Template helper methods for rendering strings with Home Assistant data."""
from __future__ import annotations
from ast import literal_eval
import asyncio
import base64
import collections.abc
from contextlib import suppress
from contextvars import ContextVar
from datetime import datetime, timedelta
from functools import partial, wraps
import json
import logging
import math
from operator import attrgetter
import random
import re
import sys
from typing import Any, Generator, Iterable, cast
from urllib.parse import urlencode as urllib_urlencode
import weakref
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace # type: ignore
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
STATE_UNKNOWN,
)
from homeassistant.core import (
HomeAssistant,
State,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import entity_registry, location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.thread import ThreadWithException
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_ENVIRONMENT_LIMITED = "template.environment_limited"
_ENVIRONMENT_STRICT = "template.environment_strict"
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#")
# Match "simple" ints and floats. -1.0, 1, +5, 5.0
_IS_NUMERIC = re.compile(r"^[+-]?(?!0\d)\d*(?:\.\d*)?$")
_RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"}
_GROUP_DOMAIN_PREFIX = "group."
_COLLECTABLE_STATE_ATTRIBUTES = {
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"name",
}
ALL_STATES_RATE_LIMIT = timedelta(minutes=1)
DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1)
template_cv: ContextVar[str | None] = ContextVar("template_cv", default=None)
@bind_hass
def attach(hass: HomeAssistant, obj: Any) -> None:
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, collections.abc.Mapping):
for child_key, child_value in obj.items():
attach(hass, child_key)
attach(hass, child_value)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(
value: Any, variables: TemplateVarsType = None, limited: bool = False
) -> Any:
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, collections.abc.Mapping):
return {
render_complex(key, variables): render_complex(item, variables)
for key, item in value.items()
}
if isinstance(value, Template):
return value.async_render(variables, limited=limited)
return value
def is_complex(value: Any) -> bool:
"""Test if data structure is a complex template."""
if isinstance(value, Template):
return True
if isinstance(value, list):
return any(is_complex(val) for val in value)
if isinstance(value, collections.abc.Mapping):
return any(is_complex(val) for val in value.keys()) or any(
is_complex(val) for val in value.values()
)
return False
def is_template_string(maybe_template: str) -> bool:
"""Check if the input is a Jinja2 template."""
return _RE_JINJA_DELIMITERS.search(maybe_template) is not None
class ResultWrapper:
"""Result wrapper class to store render result."""
render_result: str | None
def gen_result_wrapper(kls):
"""Generate a result wrapper."""
class Wrapper(kls, ResultWrapper):
"""Wrapper of a kls that can store render_result."""
def __init__(self, *args: tuple, render_result: str | None = None) -> None:
super().__init__(*args)
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
# Can't get set repr to work
if kls is set:
return str(set(self))
return cast(str, kls.__str__(self))
return self.render_result
return Wrapper
class TupleWrapper(tuple, ResultWrapper):
"""Wrap a tuple."""
# This is all magic to be allowed to subclass a tuple.
def __new__(cls, value: tuple, *, render_result: str | None = None) -> TupleWrapper:
"""Create a new tuple class."""
return super().__new__(cls, tuple(value))
# pylint: disable=super-init-not-called
def __init__(self, value: tuple, *, render_result: str | None = None):
"""Initialize a new tuple class."""
self.render_result = render_result
def __str__(self) -> str:
"""Return string representation."""
if self.render_result is None:
return super().__str__()
return self.render_result
RESULT_WRAPPERS: dict[type, type] = {
kls: gen_result_wrapper(kls) # type: ignore[no-untyped-call]
for kls in (list, dict, set)
}
RESULT_WRAPPERS[tuple] = TupleWrapper
def _true(arg: Any) -> bool:
return True
def _false(arg: Any) -> bool:
return False
class RenderInfo:
"""Holds information about a template render."""
def __init__(self, template):
"""Initialise."""
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self.filter = _true
self._result: str | None = None
self.is_static = False
self.exception: TemplateError | None = None
self.all_states = False
self.all_states_lifecycle = False
self.domains = set()
self.domains_lifecycle = set()
self.entities = set()
self.rate_limit: timedelta | None = None
self.has_time = False
def __repr__(self) -> str:
"""Representation of RenderInfo."""
return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}"
def _filter_domains_and_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific domains or entities."""
return (
split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities
)
def _filter_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific entities."""
return entity_id in self.entities
def _filter_lifecycle_domains(self, entity_id: str) -> bool:
"""Template should re-render if the entity is added or removed with domains watched."""
return split_entity_id(entity_id)[0] in self.domains_lifecycle
def result(self) -> str:
"""Results of the template computation."""
if self.exception is not None:
raise self.exception
return cast(str, self._result)
def _freeze_static(self) -> None:
self.is_static = True
self._freeze_sets()
self.all_states = False
def _freeze_sets(self) -> None:
self.entities = frozenset(self.entities)
self.domains = frozenset(self.domains)
self.domains_lifecycle = frozenset(self.domains_lifecycle)
def _freeze(self) -> None:
self._freeze_sets()
if self.rate_limit is None:
if self.all_states or self.exception:
self.rate_limit = ALL_STATES_RATE_LIMIT
elif self.domains or self.domains_lifecycle:
self.rate_limit = DOMAIN_STATES_RATE_LIMIT
if self.exception:
return
if not self.all_states_lifecycle:
if self.domains_lifecycle:
self.filter_lifecycle = self._filter_lifecycle_domains
else:
self.filter_lifecycle = _false
if self.all_states:
return
if self.domains:
self.filter = self._filter_domains_and_entities
elif self.entities:
self.filter = self._filter_entities
else:
self.filter = _false
class Template:
"""Class to hold a template and manage caching and rendering."""
__slots__ = (
"__weakref__",
"template",
"hass",
"is_static",
"_compiled_code",
"_compiled",
"_exc_info",
"_limited",
"_strict",
)
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template.strip()
self._compiled_code = None
self._compiled: jinja2.Template | None = None
self.hass = hass
self.is_static = not is_template_string(template)
self._exc_info = None
self._limited = None
self._strict = None
@property
def _env(self) -> TemplateEnvironment:
if self.hass is None:
return _NO_HASS_ENV
if self._limited:
wanted_env = _ENVIRONMENT_LIMITED
elif self._strict:
wanted_env = _ENVIRONMENT_STRICT
else:
wanted_env = _ENVIRONMENT
ret: TemplateEnvironment | None = self.hass.data.get(wanted_env)
if ret is None:
ret = self.hass.data[wanted_env] = TemplateEnvironment(self.hass, self._limited, self._strict) # type: ignore[no-untyped-call]
return ret
def ensure_valid(self) -> None:
"""Return if template is valid."""
if self.is_static or self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template) # type: ignore[no-untyped-call]
except jinja2.TemplateError as err:
raise TemplateError(err) from err
def render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
return run_callback_threadsafe(
self.hass.loop,
partial(self.async_render, variables, parse_result, limited, **kwargs),
).result()
@callback
def async_render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
strict: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
compiled = self._compiled or self._ensure_compiled(limited, strict)
if variables is not None:
kwargs.update(variables)
try:
render_result = _render_with_context(self.template, compiled, **kwargs)
except Exception as err:
raise TemplateError(err) from err
render_result = render_result.strip()
if self.hass.config.legacy_templates or not parse_result:
return render_result
return self._parse_result(render_result)
def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use
"""Parse the result."""
try:
result = literal_eval(render_result)
if type(result) in RESULT_WRAPPERS:
result = RESULT_WRAPPERS[type(result)](
result, render_result=render_result
)
# If the literal_eval result is a string, use the original
# render, by not returning right here. The evaluation of strings
# resulting in strings impacts quotes, to avoid unexpected
# output; use the original render instead of the evaluated one.
# Complex and scientific values are also unexpected. Filter them out.
if (
# Filter out string and complex numbers
not isinstance(result, (str, complex))
and (
# Pass if not numeric and not a boolean
not isinstance(result, (int, float))
# Or it's a boolean (inherit from int)
or isinstance(result, bool)
# Or if it's a digit
or _IS_NUMERIC.match(render_result) is not None
)
):
return result
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return render_result
async def async_render_will_timeout(
self,
timeout: float,
variables: TemplateVarsType = None,
strict: bool = False,
**kwargs: Any,
) -> bool:
"""Check to see if rendering a template will timeout during render.
This is intended to check for expensive templates
that will make the system unstable. The template
is rendered in the executor to ensure it does not
tie up the event loop.
This function is not a security control and is only
intended to be used as a safety check when testing
templates.
This method must be run in the event loop.
"""
if self.is_static:
return False
compiled = self._compiled or self._ensure_compiled(strict=strict)
if variables is not None:
kwargs.update(variables)
self._exc_info = None
finish_event = asyncio.Event()
def _render_template() -> None:
try:
_render_with_context(self.template, compiled, **kwargs)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
self._exc_info = sys.exc_info()
finally:
run_callback_threadsafe(self.hass.loop, finish_event.set)
try:
template_render_thread = ThreadWithException(target=_render_template)
template_render_thread.start()
await asyncio.wait_for(finish_event.wait(), timeout=timeout)
if self._exc_info:
raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2]))
except asyncio.TimeoutError:
template_render_thread.raise_exc(TimeoutError)
return True
finally:
template_render_thread.join()
return False
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, strict: bool = False, **kwargs: Any
) -> RenderInfo:
"""Render the template and collect an entity filter."""
assert self.hass and _RENDER_INFO not in self.hass.data
render_info = RenderInfo(self) # type: ignore[no-untyped-call]
# pylint: disable=protected-access
if self.is_static:
render_info._result = self.template.strip()
render_info._freeze_static()
return render_info
self.hass.data[_RENDER_INFO] = render_info
try:
render_info._result = self.async_render(variables, strict=strict, **kwargs)
except TemplateError as ex:
render_info.exception = ex
finally:
del self.hass.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
if self.is_static:
return self.template
return run_callback_threadsafe(
self.hass.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self.is_static:
return self.template
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
with suppress(ValueError, TypeError):
variables["value_json"] = json.loads(value)
try:
return _render_with_context(
self.template, self._compiled, **variables
).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(
self, limited: bool = False, strict: bool = False
) -> jinja2.Template:
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, "hass variable not set on template"
assert (
self._limited is None or self._limited == limited
), "can't change between limited and non limited template"
assert (
self._strict is None or self._strict == strict
), "can't change between strict and non strict template"
assert not (strict and limited), "can't combine strict and limited template"
self._limited = limited
self._strict = strict
env = self._env
self._compiled = cast(
jinja2.Template,
jinja2.Template.from_code(env, self._compiled_code, env.globals, None),
)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (
self.__class__ == other.__class__
and self.template == other.template
and self.hass == other.hass
)
def __hash__(self) -> int:
"""Hash code for template."""
return hash(self.template)
def __repr__(self) -> str:
"""Representation of Template."""
return 'Template("' + self.template + '")'
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
if "." in name:
return _get_state_if_valid(self._hass, name)
if name in _RESERVED_NAMES:
return None
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._hass, name)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_all(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states = True
def _collect_all_lifecycle(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states_lifecycle = True
def __iter__(self):
"""Return all states."""
self._collect_all()
return _state_generator(self._hass, None)
def __len__(self) -> int:
"""Return number of states."""
self._collect_all_lifecycle()
return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id):
"""Return the states."""
state = _get_state(self._hass, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self) -> str:
"""Representation of All States."""
return "<template AllStates>"
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass: HomeAssistant, domain: str) -> None:
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _get_state_if_valid(self._hass, f"{self._domain}.{name}")
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_domain(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains.add(self._domain)
def _collect_domain_lifecycle(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains_lifecycle.add(self._domain)
def __iter__(self):
"""Return the iteration over all the states."""
self._collect_domain()
return _state_generator(self._hass, self._domain)
def __len__(self) -> int:
"""Return number of states."""
self._collect_domain_lifecycle()
return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str:
"""Representation of Domain States."""
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
"""Class to represent a state object in a template."""
__slots__ = ("_hass", "_state", "_collect")
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, hass: HomeAssistant, state: State, collect: bool = True) -> None:
"""Initialize template state."""
self._hass = hass
self._state = state
self._collect = collect
def _collect_state(self) -> None:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
def __getitem__(self, item):
"""Return a property as an attribute for jinja."""
if item in _COLLECTABLE_STATE_ATTRIBUTES:
# _collect_state inlined here for performance
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
return getattr(self._state, item)
if item == "entity_id":
return self._state.entity_id
if item == "state_with_unit":
return self.state_with_unit
raise KeyError
@property
def entity_id(self):
"""Wrap State.entity_id.
Intentionally does not collect state
"""
return self._state.entity_id
@property
def state(self):
"""Wrap State.state."""
self._collect_state()
return self._state.state
@property
def attributes(self):
"""Wrap State.attributes."""
self._collect_state()
return self._state.attributes
@property
def last_changed(self):
"""Wrap State.last_changed."""
self._collect_state()
return self._state.last_changed
@property
def last_updated(self):
"""Wrap State.last_updated."""
self._collect_state()
return self._state.last_updated
@property
def context(self):
"""Wrap State.context."""
self._collect_state()
return self._state.context
@property
def domain(self):
"""Wrap State.domain."""
self._collect_state()
return self._state.domain
@property
def object_id(self):
"""Wrap State.object_id."""
self._collect_state()
return self._state.object_id
@property
def name(self):
"""Wrap State.name."""
self._collect_state()
return self._state.name
@property
def state_with_unit(self) -> str:
"""Return the state concatenated with the unit if available."""
self._collect_state()
unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return f"{self._state.state} {unit}" if unit else self._state.state
def __eq__(self, other: Any) -> bool:
"""Ensure we collect on equality check."""
self._collect_state()
return self._state.__eq__(other)
def __repr__(self) -> str:
"""Representation of Template State."""
return f"<template TemplateState({self._state.__repr__()})>"
def _collect_state(hass: HomeAssistant, entity_id: str) -> None:
entity_collect = hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.entities.add(entity_id)
def _state_generator(hass: HomeAssistant, domain: str | None) -> Generator:
"""State generator for a domain or all states."""
for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")):
yield TemplateState(hass, state, collect=False)
def _get_state_if_valid(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
state = hass.states.get(entity_id)
if state is None and not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'") # type: ignore
return _get_template_state_from_state(hass, entity_id, state)
def _get_state(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id))
def _get_template_state_from_state(
hass: HomeAssistant, entity_id: str, state: State | None
) -> TemplateState | None:
if state is None:
# Only need to collect if none, if not none collect first actual
# access to the state properties in the state wrapper.
_collect_state(hass, entity_id)
return None
return TemplateState(hass, state)
def _resolve_state(
hass: HomeAssistant, entity_id_or_state: Any
) -> State | TemplateState | None:
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(hass, entity_id_or_state)
return None
def result_as_boolean(template_result: str | None) -> bool:
"""Convert the template result to a boolean.
True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy
False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy
"""
try:
# Import here, not at top-level to avoid circular import
from homeassistant.helpers import ( # pylint: disable=import-outside-toplevel
config_validation as cv,
)
return cv.boolean(template_result)
except vol.Invalid:
return False
def expand(hass: HomeAssistant, *args: Any) -> Iterable[State]:
"""Expand out any groups into entity states."""
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(hass, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, collections.abc.Iterable):
search += entity
continue
else:
# ignore other types
continue
if entity_id.startswith(_GROUP_DOMAIN_PREFIX):
# Collect state will be called in here since it's wrapped
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
_collect_state(hass, entity_id)
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def device_entities(hass: HomeAssistant, device_id: str) -> Iterable[str]:
"""Get entity ids for entities tied to a device."""
entity_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_device(entity_reg, device_id)
return [entry.entity_id for entry in entries]
def closest(hass, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
"""
if len(args) == 1:
latitude = hass.config.latitude
longitude = hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(hass, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(hass, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
def closest_filter(hass, *args):
"""Call closest as a filter. Need to reorder arguments."""
new_args = list(args[1:])
new_args.append(args[0])
return closest(hass, *new_args)
def distance(hass, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, str) and not valid_entity_id(value):
point_state = None
else:
point_state = _resolve_state(hass, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return hass.config.distance(*locations[0])
return hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS
)
def is_state(hass: HomeAssistant, entity_id: str, state: State) -> bool:
"""Test if a state is a specific value."""
state_obj = _get_state(hass, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(hass, entity_id, name, value):
"""Test if a state's attribute is a specific value."""
attr = state_attr(hass, entity_id, name)
return attr is not None and attr == value
def state_attr(hass, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = _get_state(hass, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def now(hass):
"""Record fetching now."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.now()
def utcnow(hass):
"""Record fetching utcnow."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.utcnow()
def forgiving_round(value, precision=0, method="common"):
"""Round accepted strings."""
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
"""Filter to get arc sine of the value."""
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
"""Filter to get arc cosine of the value."""
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
"""Filter to get arc tangent of the value."""
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
"""Filter to calculate four quadrant arc tangent of y / x."""
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError, TypeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
"""Convert a JSON string to an object."""
return json.loads(value)
def to_json(value):
"""Convert an object to a JSON string."""
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
def relative_time(value):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
"""
if not isinstance(value, datetime):
return value
if not value.tzinfo:
value = dt_util.as_local(value)
if dt_util.now() < value:
return value
return dt_util.get_age(value)
def urlencode(value):
"""Urlencode dictionary and return as UTF-8 string."""
return urllib_urlencode(value).encode("utf-8")
def _render_with_context(
template_str: str, template: jinja2.Template, **kwargs: Any
) -> str:
"""Store template being rendered in a ContextVar to aid error handling."""
template_cv.set(template_str)
return template.render(**kwargs)
class LoggingUndefined(jinja2.Undefined):
"""Log on undefined variables."""
def _log_message(self):
template = template_cv.get() or ""
_LOGGER.warning(
"Template variable warning: %s when rendering '%s'",
self._undefined_message,
template,
)
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as ex:
template = template_cv.get() or ""
_LOGGER.error(
"Template variable error: %s when rendering '%s'",
self._undefined_message,
template,
)
raise ex
def __str__(self):
"""Log undefined __str___."""
self._log_message()
return super().__str__()
def __iter__(self):
"""Log undefined __iter___."""
self._log_message()
return super().__iter__()
def __bool__(self):
"""Log undefined __bool___."""
self._log_message()
return super().__bool__()
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def __init__(self, hass, limited=False, strict=False):
"""Initialise template environment."""
if not strict:
undefined = LoggingUndefined
else:
undefined = jinja2.StrictUndefined
super().__init__(undefined=undefined)
self.hass = hass
self.template_cache = weakref.WeakValueDictionary()
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["as_local"] = dt_util.as_local
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["as_local"] = dt_util.as_local
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = relative_time
self.globals["timedelta"] = timedelta
self.globals["strptime"] = strptime
self.globals["urlencode"] = urlencode
self.globals["max"] = max
self.globals["min"] = min
if hass is None:
return
# We mark these as a context functions to ensure they get
# evaluated fresh with every execution, rather than executed
# at compile time and the value stored. The context itself
# can be discarded, we only need to get at the hass object.
def hassfunction(func):
"""Wrap function that depend on hass."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(hass, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["device_entities"] = hassfunction(device_entities)
self.filters["device_entities"] = contextfilter(self.globals["device_entities"])
if limited:
# Only device_entities is available to limited templates, mark other
# functions and filters as unsupported.
def unsupported(name):
def warn_unsupported(*args, **kwargs):
raise TemplateError(
f"Use of '{name}' is not supported in limited templates"
)
return warn_unsupported
hass_globals = [
"closest",
"distance",
"expand",
"is_state",
"is_state_attr",
"state_attr",
"states",
"utcnow",
"now",
]
hass_filters = ["closest", "expand"]
for glob in hass_globals:
self.globals[glob] = unsupported(glob)
for filt in hass_filters:
self.filters[filt] = unsupported(filt)
return
self.globals["expand"] = hassfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = hassfunction(closest)
self.filters["closest"] = contextfilter(hassfunction(closest_filter))
self.globals["distance"] = hassfunction(distance)
self.globals["is_state"] = hassfunction(is_state)
self.globals["is_state_attr"] = hassfunction(is_state_attr)
self.globals["state_attr"] = hassfunction(state_attr)
self.globals["states"] = AllStates(hass)
self.globals["utcnow"] = hassfunction(utcnow)
self.globals["now"] = hassfunction(now)
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
if isinstance(obj, (AllStates, DomainStates, TemplateState)):
return attr[0] != "_"
if isinstance(obj, Namespace):
return True
return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile the template."""
if (
name is not None
or filename is not None
or raw is not False
or defer_init is not False
):
# If there are any non-default keywords args, we do
# not cache. In prodution we currently do not have
# any instance of this.
return super().compile(source, name, filename, raw, defer_init)
cached = self.template_cache.get(source)
if cached is None:
cached = self.template_cache[source] = super().compile(source)
return cached
_NO_HASS_ENV = TemplateEnvironment(None) # type: ignore[no-untyped-call]
|
relative_time
|
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
|
"""Template helper methods for rendering strings with Home Assistant data."""
from __future__ import annotations
from ast import literal_eval
import asyncio
import base64
import collections.abc
from contextlib import suppress
from contextvars import ContextVar
from datetime import datetime, timedelta
from functools import partial, wraps
import json
import logging
import math
from operator import attrgetter
import random
import re
import sys
from typing import Any, Generator, Iterable, cast
from urllib.parse import urlencode as urllib_urlencode
import weakref
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace # type: ignore
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
STATE_UNKNOWN,
)
from homeassistant.core import (
HomeAssistant,
State,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import entity_registry, location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.thread import ThreadWithException
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_ENVIRONMENT_LIMITED = "template.environment_limited"
_ENVIRONMENT_STRICT = "template.environment_strict"
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#")
# Match "simple" ints and floats. -1.0, 1, +5, 5.0
_IS_NUMERIC = re.compile(r"^[+-]?(?!0\d)\d*(?:\.\d*)?$")
_RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"}
_GROUP_DOMAIN_PREFIX = "group."
_COLLECTABLE_STATE_ATTRIBUTES = {
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"name",
}
ALL_STATES_RATE_LIMIT = timedelta(minutes=1)
DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1)
template_cv: ContextVar[str | None] = ContextVar("template_cv", default=None)
@bind_hass
def attach(hass: HomeAssistant, obj: Any) -> None:
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, collections.abc.Mapping):
for child_key, child_value in obj.items():
attach(hass, child_key)
attach(hass, child_value)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(
value: Any, variables: TemplateVarsType = None, limited: bool = False
) -> Any:
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, collections.abc.Mapping):
return {
render_complex(key, variables): render_complex(item, variables)
for key, item in value.items()
}
if isinstance(value, Template):
return value.async_render(variables, limited=limited)
return value
def is_complex(value: Any) -> bool:
"""Test if data structure is a complex template."""
if isinstance(value, Template):
return True
if isinstance(value, list):
return any(is_complex(val) for val in value)
if isinstance(value, collections.abc.Mapping):
return any(is_complex(val) for val in value.keys()) or any(
is_complex(val) for val in value.values()
)
return False
def is_template_string(maybe_template: str) -> bool:
"""Check if the input is a Jinja2 template."""
return _RE_JINJA_DELIMITERS.search(maybe_template) is not None
class ResultWrapper:
"""Result wrapper class to store render result."""
render_result: str | None
def gen_result_wrapper(kls):
"""Generate a result wrapper."""
class Wrapper(kls, ResultWrapper):
"""Wrapper of a kls that can store render_result."""
def __init__(self, *args: tuple, render_result: str | None = None) -> None:
super().__init__(*args)
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
# Can't get set repr to work
if kls is set:
return str(set(self))
return cast(str, kls.__str__(self))
return self.render_result
return Wrapper
class TupleWrapper(tuple, ResultWrapper):
"""Wrap a tuple."""
# This is all magic to be allowed to subclass a tuple.
def __new__(cls, value: tuple, *, render_result: str | None = None) -> TupleWrapper:
"""Create a new tuple class."""
return super().__new__(cls, tuple(value))
# pylint: disable=super-init-not-called
def __init__(self, value: tuple, *, render_result: str | None = None):
"""Initialize a new tuple class."""
self.render_result = render_result
def __str__(self) -> str:
"""Return string representation."""
if self.render_result is None:
return super().__str__()
return self.render_result
RESULT_WRAPPERS: dict[type, type] = {
kls: gen_result_wrapper(kls) # type: ignore[no-untyped-call]
for kls in (list, dict, set)
}
RESULT_WRAPPERS[tuple] = TupleWrapper
def _true(arg: Any) -> bool:
return True
def _false(arg: Any) -> bool:
return False
class RenderInfo:
"""Holds information about a template render."""
def __init__(self, template):
"""Initialise."""
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self.filter = _true
self._result: str | None = None
self.is_static = False
self.exception: TemplateError | None = None
self.all_states = False
self.all_states_lifecycle = False
self.domains = set()
self.domains_lifecycle = set()
self.entities = set()
self.rate_limit: timedelta | None = None
self.has_time = False
def __repr__(self) -> str:
"""Representation of RenderInfo."""
return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}"
def _filter_domains_and_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific domains or entities."""
return (
split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities
)
def _filter_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific entities."""
return entity_id in self.entities
def _filter_lifecycle_domains(self, entity_id: str) -> bool:
"""Template should re-render if the entity is added or removed with domains watched."""
return split_entity_id(entity_id)[0] in self.domains_lifecycle
def result(self) -> str:
"""Results of the template computation."""
if self.exception is not None:
raise self.exception
return cast(str, self._result)
def _freeze_static(self) -> None:
self.is_static = True
self._freeze_sets()
self.all_states = False
def _freeze_sets(self) -> None:
self.entities = frozenset(self.entities)
self.domains = frozenset(self.domains)
self.domains_lifecycle = frozenset(self.domains_lifecycle)
def _freeze(self) -> None:
self._freeze_sets()
if self.rate_limit is None:
if self.all_states or self.exception:
self.rate_limit = ALL_STATES_RATE_LIMIT
elif self.domains or self.domains_lifecycle:
self.rate_limit = DOMAIN_STATES_RATE_LIMIT
if self.exception:
return
if not self.all_states_lifecycle:
if self.domains_lifecycle:
self.filter_lifecycle = self._filter_lifecycle_domains
else:
self.filter_lifecycle = _false
if self.all_states:
return
if self.domains:
self.filter = self._filter_domains_and_entities
elif self.entities:
self.filter = self._filter_entities
else:
self.filter = _false
class Template:
"""Class to hold a template and manage caching and rendering."""
__slots__ = (
"__weakref__",
"template",
"hass",
"is_static",
"_compiled_code",
"_compiled",
"_exc_info",
"_limited",
"_strict",
)
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template.strip()
self._compiled_code = None
self._compiled: jinja2.Template | None = None
self.hass = hass
self.is_static = not is_template_string(template)
self._exc_info = None
self._limited = None
self._strict = None
@property
def _env(self) -> TemplateEnvironment:
if self.hass is None:
return _NO_HASS_ENV
if self._limited:
wanted_env = _ENVIRONMENT_LIMITED
elif self._strict:
wanted_env = _ENVIRONMENT_STRICT
else:
wanted_env = _ENVIRONMENT
ret: TemplateEnvironment | None = self.hass.data.get(wanted_env)
if ret is None:
ret = self.hass.data[wanted_env] = TemplateEnvironment(self.hass, self._limited, self._strict) # type: ignore[no-untyped-call]
return ret
def ensure_valid(self) -> None:
"""Return if template is valid."""
if self.is_static or self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template) # type: ignore[no-untyped-call]
except jinja2.TemplateError as err:
raise TemplateError(err) from err
def render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
return run_callback_threadsafe(
self.hass.loop,
partial(self.async_render, variables, parse_result, limited, **kwargs),
).result()
@callback
def async_render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
strict: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
compiled = self._compiled or self._ensure_compiled(limited, strict)
if variables is not None:
kwargs.update(variables)
try:
render_result = _render_with_context(self.template, compiled, **kwargs)
except Exception as err:
raise TemplateError(err) from err
render_result = render_result.strip()
if self.hass.config.legacy_templates or not parse_result:
return render_result
return self._parse_result(render_result)
def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use
"""Parse the result."""
try:
result = literal_eval(render_result)
if type(result) in RESULT_WRAPPERS:
result = RESULT_WRAPPERS[type(result)](
result, render_result=render_result
)
# If the literal_eval result is a string, use the original
# render, by not returning right here. The evaluation of strings
# resulting in strings impacts quotes, to avoid unexpected
# output; use the original render instead of the evaluated one.
# Complex and scientific values are also unexpected. Filter them out.
if (
# Filter out string and complex numbers
not isinstance(result, (str, complex))
and (
# Pass if not numeric and not a boolean
not isinstance(result, (int, float))
# Or it's a boolean (inherit from int)
or isinstance(result, bool)
# Or if it's a digit
or _IS_NUMERIC.match(render_result) is not None
)
):
return result
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return render_result
async def async_render_will_timeout(
self,
timeout: float,
variables: TemplateVarsType = None,
strict: bool = False,
**kwargs: Any,
) -> bool:
"""Check to see if rendering a template will timeout during render.
This is intended to check for expensive templates
that will make the system unstable. The template
is rendered in the executor to ensure it does not
tie up the event loop.
This function is not a security control and is only
intended to be used as a safety check when testing
templates.
This method must be run in the event loop.
"""
if self.is_static:
return False
compiled = self._compiled or self._ensure_compiled(strict=strict)
if variables is not None:
kwargs.update(variables)
self._exc_info = None
finish_event = asyncio.Event()
def _render_template() -> None:
try:
_render_with_context(self.template, compiled, **kwargs)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
self._exc_info = sys.exc_info()
finally:
run_callback_threadsafe(self.hass.loop, finish_event.set)
try:
template_render_thread = ThreadWithException(target=_render_template)
template_render_thread.start()
await asyncio.wait_for(finish_event.wait(), timeout=timeout)
if self._exc_info:
raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2]))
except asyncio.TimeoutError:
template_render_thread.raise_exc(TimeoutError)
return True
finally:
template_render_thread.join()
return False
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, strict: bool = False, **kwargs: Any
) -> RenderInfo:
"""Render the template and collect an entity filter."""
assert self.hass and _RENDER_INFO not in self.hass.data
render_info = RenderInfo(self) # type: ignore[no-untyped-call]
# pylint: disable=protected-access
if self.is_static:
render_info._result = self.template.strip()
render_info._freeze_static()
return render_info
self.hass.data[_RENDER_INFO] = render_info
try:
render_info._result = self.async_render(variables, strict=strict, **kwargs)
except TemplateError as ex:
render_info.exception = ex
finally:
del self.hass.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
if self.is_static:
return self.template
return run_callback_threadsafe(
self.hass.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self.is_static:
return self.template
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
with suppress(ValueError, TypeError):
variables["value_json"] = json.loads(value)
try:
return _render_with_context(
self.template, self._compiled, **variables
).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(
self, limited: bool = False, strict: bool = False
) -> jinja2.Template:
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, "hass variable not set on template"
assert (
self._limited is None or self._limited == limited
), "can't change between limited and non limited template"
assert (
self._strict is None or self._strict == strict
), "can't change between strict and non strict template"
assert not (strict and limited), "can't combine strict and limited template"
self._limited = limited
self._strict = strict
env = self._env
self._compiled = cast(
jinja2.Template,
jinja2.Template.from_code(env, self._compiled_code, env.globals, None),
)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (
self.__class__ == other.__class__
and self.template == other.template
and self.hass == other.hass
)
def __hash__(self) -> int:
"""Hash code for template."""
return hash(self.template)
def __repr__(self) -> str:
"""Representation of Template."""
return 'Template("' + self.template + '")'
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
if "." in name:
return _get_state_if_valid(self._hass, name)
if name in _RESERVED_NAMES:
return None
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._hass, name)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_all(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states = True
def _collect_all_lifecycle(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states_lifecycle = True
def __iter__(self):
"""Return all states."""
self._collect_all()
return _state_generator(self._hass, None)
def __len__(self) -> int:
"""Return number of states."""
self._collect_all_lifecycle()
return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id):
"""Return the states."""
state = _get_state(self._hass, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self) -> str:
"""Representation of All States."""
return "<template AllStates>"
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass: HomeAssistant, domain: str) -> None:
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _get_state_if_valid(self._hass, f"{self._domain}.{name}")
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_domain(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains.add(self._domain)
def _collect_domain_lifecycle(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains_lifecycle.add(self._domain)
def __iter__(self):
"""Return the iteration over all the states."""
self._collect_domain()
return _state_generator(self._hass, self._domain)
def __len__(self) -> int:
"""Return number of states."""
self._collect_domain_lifecycle()
return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str:
"""Representation of Domain States."""
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
"""Class to represent a state object in a template."""
__slots__ = ("_hass", "_state", "_collect")
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, hass: HomeAssistant, state: State, collect: bool = True) -> None:
"""Initialize template state."""
self._hass = hass
self._state = state
self._collect = collect
def _collect_state(self) -> None:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
def __getitem__(self, item):
"""Return a property as an attribute for jinja."""
if item in _COLLECTABLE_STATE_ATTRIBUTES:
# _collect_state inlined here for performance
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
return getattr(self._state, item)
if item == "entity_id":
return self._state.entity_id
if item == "state_with_unit":
return self.state_with_unit
raise KeyError
@property
def entity_id(self):
"""Wrap State.entity_id.
Intentionally does not collect state
"""
return self._state.entity_id
@property
def state(self):
"""Wrap State.state."""
self._collect_state()
return self._state.state
@property
def attributes(self):
"""Wrap State.attributes."""
self._collect_state()
return self._state.attributes
@property
def last_changed(self):
"""Wrap State.last_changed."""
self._collect_state()
return self._state.last_changed
@property
def last_updated(self):
"""Wrap State.last_updated."""
self._collect_state()
return self._state.last_updated
@property
def context(self):
"""Wrap State.context."""
self._collect_state()
return self._state.context
@property
def domain(self):
"""Wrap State.domain."""
self._collect_state()
return self._state.domain
@property
def object_id(self):
"""Wrap State.object_id."""
self._collect_state()
return self._state.object_id
@property
def name(self):
"""Wrap State.name."""
self._collect_state()
return self._state.name
@property
def state_with_unit(self) -> str:
"""Return the state concatenated with the unit if available."""
self._collect_state()
unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return f"{self._state.state} {unit}" if unit else self._state.state
def __eq__(self, other: Any) -> bool:
"""Ensure we collect on equality check."""
self._collect_state()
return self._state.__eq__(other)
def __repr__(self) -> str:
"""Representation of Template State."""
return f"<template TemplateState({self._state.__repr__()})>"
def _collect_state(hass: HomeAssistant, entity_id: str) -> None:
entity_collect = hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.entities.add(entity_id)
def _state_generator(hass: HomeAssistant, domain: str | None) -> Generator:
"""State generator for a domain or all states."""
for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")):
yield TemplateState(hass, state, collect=False)
def _get_state_if_valid(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
state = hass.states.get(entity_id)
if state is None and not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'") # type: ignore
return _get_template_state_from_state(hass, entity_id, state)
def _get_state(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id))
def _get_template_state_from_state(
hass: HomeAssistant, entity_id: str, state: State | None
) -> TemplateState | None:
if state is None:
# Only need to collect if none, if not none collect first actual
# access to the state properties in the state wrapper.
_collect_state(hass, entity_id)
return None
return TemplateState(hass, state)
def _resolve_state(
hass: HomeAssistant, entity_id_or_state: Any
) -> State | TemplateState | None:
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(hass, entity_id_or_state)
return None
def result_as_boolean(template_result: str | None) -> bool:
"""Convert the template result to a boolean.
True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy
False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy
"""
try:
# Import here, not at top-level to avoid circular import
from homeassistant.helpers import ( # pylint: disable=import-outside-toplevel
config_validation as cv,
)
return cv.boolean(template_result)
except vol.Invalid:
return False
def expand(hass: HomeAssistant, *args: Any) -> Iterable[State]:
"""Expand out any groups into entity states."""
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(hass, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, collections.abc.Iterable):
search += entity
continue
else:
# ignore other types
continue
if entity_id.startswith(_GROUP_DOMAIN_PREFIX):
# Collect state will be called in here since it's wrapped
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
_collect_state(hass, entity_id)
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def device_entities(hass: HomeAssistant, device_id: str) -> Iterable[str]:
"""Get entity ids for entities tied to a device."""
entity_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_device(entity_reg, device_id)
return [entry.entity_id for entry in entries]
def closest(hass, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
"""
if len(args) == 1:
latitude = hass.config.latitude
longitude = hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(hass, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(hass, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
def closest_filter(hass, *args):
"""Call closest as a filter. Need to reorder arguments."""
new_args = list(args[1:])
new_args.append(args[0])
return closest(hass, *new_args)
def distance(hass, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, str) and not valid_entity_id(value):
point_state = None
else:
point_state = _resolve_state(hass, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return hass.config.distance(*locations[0])
return hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS
)
def is_state(hass: HomeAssistant, entity_id: str, state: State) -> bool:
"""Test if a state is a specific value."""
state_obj = _get_state(hass, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(hass, entity_id, name, value):
"""Test if a state's attribute is a specific value."""
attr = state_attr(hass, entity_id, name)
return attr is not None and attr == value
def state_attr(hass, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = _get_state(hass, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def now(hass):
"""Record fetching now."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.now()
def utcnow(hass):
"""Record fetching utcnow."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.utcnow()
def forgiving_round(value, precision=0, method="common"):
"""Round accepted strings."""
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
"""Filter to get arc sine of the value."""
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
"""Filter to get arc cosine of the value."""
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
"""Filter to get arc tangent of the value."""
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
"""Filter to calculate four quadrant arc tangent of y / x."""
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError, TypeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
"""Convert a JSON string to an object."""
return json.loads(value)
def to_json(value):
"""Convert an object to a JSON string."""
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
# MASKED: relative_time function (lines 1325-1342)
def urlencode(value):
"""Urlencode dictionary and return as UTF-8 string."""
return urllib_urlencode(value).encode("utf-8")
def _render_with_context(
template_str: str, template: jinja2.Template, **kwargs: Any
) -> str:
"""Store template being rendered in a ContextVar to aid error handling."""
template_cv.set(template_str)
return template.render(**kwargs)
class LoggingUndefined(jinja2.Undefined):
"""Log on undefined variables."""
def _log_message(self):
template = template_cv.get() or ""
_LOGGER.warning(
"Template variable warning: %s when rendering '%s'",
self._undefined_message,
template,
)
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as ex:
template = template_cv.get() or ""
_LOGGER.error(
"Template variable error: %s when rendering '%s'",
self._undefined_message,
template,
)
raise ex
def __str__(self):
"""Log undefined __str___."""
self._log_message()
return super().__str__()
def __iter__(self):
"""Log undefined __iter___."""
self._log_message()
return super().__iter__()
def __bool__(self):
"""Log undefined __bool___."""
self._log_message()
return super().__bool__()
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def __init__(self, hass, limited=False, strict=False):
"""Initialise template environment."""
if not strict:
undefined = LoggingUndefined
else:
undefined = jinja2.StrictUndefined
super().__init__(undefined=undefined)
self.hass = hass
self.template_cache = weakref.WeakValueDictionary()
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["as_local"] = dt_util.as_local
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["as_local"] = dt_util.as_local
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = relative_time
self.globals["timedelta"] = timedelta
self.globals["strptime"] = strptime
self.globals["urlencode"] = urlencode
self.globals["max"] = max
self.globals["min"] = min
if hass is None:
return
# We mark these as a context functions to ensure they get
# evaluated fresh with every execution, rather than executed
# at compile time and the value stored. The context itself
# can be discarded, we only need to get at the hass object.
def hassfunction(func):
"""Wrap function that depend on hass."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(hass, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["device_entities"] = hassfunction(device_entities)
self.filters["device_entities"] = contextfilter(self.globals["device_entities"])
if limited:
# Only device_entities is available to limited templates, mark other
# functions and filters as unsupported.
def unsupported(name):
def warn_unsupported(*args, **kwargs):
raise TemplateError(
f"Use of '{name}' is not supported in limited templates"
)
return warn_unsupported
hass_globals = [
"closest",
"distance",
"expand",
"is_state",
"is_state_attr",
"state_attr",
"states",
"utcnow",
"now",
]
hass_filters = ["closest", "expand"]
for glob in hass_globals:
self.globals[glob] = unsupported(glob)
for filt in hass_filters:
self.filters[filt] = unsupported(filt)
return
self.globals["expand"] = hassfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = hassfunction(closest)
self.filters["closest"] = contextfilter(hassfunction(closest_filter))
self.globals["distance"] = hassfunction(distance)
self.globals["is_state"] = hassfunction(is_state)
self.globals["is_state_attr"] = hassfunction(is_state_attr)
self.globals["state_attr"] = hassfunction(state_attr)
self.globals["states"] = AllStates(hass)
self.globals["utcnow"] = hassfunction(utcnow)
self.globals["now"] = hassfunction(now)
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
if isinstance(obj, (AllStates, DomainStates, TemplateState)):
return attr[0] != "_"
if isinstance(obj, Namespace):
return True
return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile the template."""
if (
name is not None
or filename is not None
or raw is not False
or defer_init is not False
):
# If there are any non-default keywords args, we do
# not cache. In prodution we currently do not have
# any instance of this.
return super().compile(source, name, filename, raw, defer_init)
cached = self.template_cache.get(source)
if cached is None:
cached = self.template_cache[source] = super().compile(source)
return cached
_NO_HASS_ENV = TemplateEnvironment(None) # type: ignore[no-untyped-call]
|
def relative_time(value):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
"""
if not isinstance(value, datetime):
return value
if not value.tzinfo:
value = dt_util.as_local(value)
if dt_util.now() < value:
return value
return dt_util.get_age(value)
| 1,325
| 1,342
|
"""Template helper methods for rendering strings with Home Assistant data."""
from __future__ import annotations
from ast import literal_eval
import asyncio
import base64
import collections.abc
from contextlib import suppress
from contextvars import ContextVar
from datetime import datetime, timedelta
from functools import partial, wraps
import json
import logging
import math
from operator import attrgetter
import random
import re
import sys
from typing import Any, Generator, Iterable, cast
from urllib.parse import urlencode as urllib_urlencode
import weakref
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace # type: ignore
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
STATE_UNKNOWN,
)
from homeassistant.core import (
HomeAssistant,
State,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import entity_registry, location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.thread import ThreadWithException
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_ENVIRONMENT_LIMITED = "template.environment_limited"
_ENVIRONMENT_STRICT = "template.environment_strict"
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#")
# Match "simple" ints and floats. -1.0, 1, +5, 5.0
_IS_NUMERIC = re.compile(r"^[+-]?(?!0\d)\d*(?:\.\d*)?$")
_RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"}
_GROUP_DOMAIN_PREFIX = "group."
_COLLECTABLE_STATE_ATTRIBUTES = {
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"name",
}
ALL_STATES_RATE_LIMIT = timedelta(minutes=1)
DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1)
template_cv: ContextVar[str | None] = ContextVar("template_cv", default=None)
@bind_hass
def attach(hass: HomeAssistant, obj: Any) -> None:
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, collections.abc.Mapping):
for child_key, child_value in obj.items():
attach(hass, child_key)
attach(hass, child_value)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(
value: Any, variables: TemplateVarsType = None, limited: bool = False
) -> Any:
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, collections.abc.Mapping):
return {
render_complex(key, variables): render_complex(item, variables)
for key, item in value.items()
}
if isinstance(value, Template):
return value.async_render(variables, limited=limited)
return value
def is_complex(value: Any) -> bool:
"""Test if data structure is a complex template."""
if isinstance(value, Template):
return True
if isinstance(value, list):
return any(is_complex(val) for val in value)
if isinstance(value, collections.abc.Mapping):
return any(is_complex(val) for val in value.keys()) or any(
is_complex(val) for val in value.values()
)
return False
def is_template_string(maybe_template: str) -> bool:
"""Check if the input is a Jinja2 template."""
return _RE_JINJA_DELIMITERS.search(maybe_template) is not None
class ResultWrapper:
"""Result wrapper class to store render result."""
render_result: str | None
def gen_result_wrapper(kls):
"""Generate a result wrapper."""
class Wrapper(kls, ResultWrapper):
"""Wrapper of a kls that can store render_result."""
def __init__(self, *args: tuple, render_result: str | None = None) -> None:
super().__init__(*args)
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
# Can't get set repr to work
if kls is set:
return str(set(self))
return cast(str, kls.__str__(self))
return self.render_result
return Wrapper
class TupleWrapper(tuple, ResultWrapper):
"""Wrap a tuple."""
# This is all magic to be allowed to subclass a tuple.
def __new__(cls, value: tuple, *, render_result: str | None = None) -> TupleWrapper:
"""Create a new tuple class."""
return super().__new__(cls, tuple(value))
# pylint: disable=super-init-not-called
def __init__(self, value: tuple, *, render_result: str | None = None):
"""Initialize a new tuple class."""
self.render_result = render_result
def __str__(self) -> str:
"""Return string representation."""
if self.render_result is None:
return super().__str__()
return self.render_result
RESULT_WRAPPERS: dict[type, type] = {
kls: gen_result_wrapper(kls) # type: ignore[no-untyped-call]
for kls in (list, dict, set)
}
RESULT_WRAPPERS[tuple] = TupleWrapper
def _true(arg: Any) -> bool:
return True
def _false(arg: Any) -> bool:
return False
class RenderInfo:
"""Holds information about a template render."""
def __init__(self, template):
"""Initialise."""
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self.filter = _true
self._result: str | None = None
self.is_static = False
self.exception: TemplateError | None = None
self.all_states = False
self.all_states_lifecycle = False
self.domains = set()
self.domains_lifecycle = set()
self.entities = set()
self.rate_limit: timedelta | None = None
self.has_time = False
def __repr__(self) -> str:
"""Representation of RenderInfo."""
return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}"
def _filter_domains_and_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific domains or entities."""
return (
split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities
)
def _filter_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific entities."""
return entity_id in self.entities
def _filter_lifecycle_domains(self, entity_id: str) -> bool:
"""Template should re-render if the entity is added or removed with domains watched."""
return split_entity_id(entity_id)[0] in self.domains_lifecycle
def result(self) -> str:
"""Results of the template computation."""
if self.exception is not None:
raise self.exception
return cast(str, self._result)
def _freeze_static(self) -> None:
self.is_static = True
self._freeze_sets()
self.all_states = False
def _freeze_sets(self) -> None:
self.entities = frozenset(self.entities)
self.domains = frozenset(self.domains)
self.domains_lifecycle = frozenset(self.domains_lifecycle)
def _freeze(self) -> None:
self._freeze_sets()
if self.rate_limit is None:
if self.all_states or self.exception:
self.rate_limit = ALL_STATES_RATE_LIMIT
elif self.domains or self.domains_lifecycle:
self.rate_limit = DOMAIN_STATES_RATE_LIMIT
if self.exception:
return
if not self.all_states_lifecycle:
if self.domains_lifecycle:
self.filter_lifecycle = self._filter_lifecycle_domains
else:
self.filter_lifecycle = _false
if self.all_states:
return
if self.domains:
self.filter = self._filter_domains_and_entities
elif self.entities:
self.filter = self._filter_entities
else:
self.filter = _false
class Template:
"""Class to hold a template and manage caching and rendering."""
__slots__ = (
"__weakref__",
"template",
"hass",
"is_static",
"_compiled_code",
"_compiled",
"_exc_info",
"_limited",
"_strict",
)
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template.strip()
self._compiled_code = None
self._compiled: jinja2.Template | None = None
self.hass = hass
self.is_static = not is_template_string(template)
self._exc_info = None
self._limited = None
self._strict = None
@property
def _env(self) -> TemplateEnvironment:
if self.hass is None:
return _NO_HASS_ENV
if self._limited:
wanted_env = _ENVIRONMENT_LIMITED
elif self._strict:
wanted_env = _ENVIRONMENT_STRICT
else:
wanted_env = _ENVIRONMENT
ret: TemplateEnvironment | None = self.hass.data.get(wanted_env)
if ret is None:
ret = self.hass.data[wanted_env] = TemplateEnvironment(self.hass, self._limited, self._strict) # type: ignore[no-untyped-call]
return ret
def ensure_valid(self) -> None:
"""Return if template is valid."""
if self.is_static or self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template) # type: ignore[no-untyped-call]
except jinja2.TemplateError as err:
raise TemplateError(err) from err
def render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
return run_callback_threadsafe(
self.hass.loop,
partial(self.async_render, variables, parse_result, limited, **kwargs),
).result()
@callback
def async_render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
strict: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
compiled = self._compiled or self._ensure_compiled(limited, strict)
if variables is not None:
kwargs.update(variables)
try:
render_result = _render_with_context(self.template, compiled, **kwargs)
except Exception as err:
raise TemplateError(err) from err
render_result = render_result.strip()
if self.hass.config.legacy_templates or not parse_result:
return render_result
return self._parse_result(render_result)
def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use
"""Parse the result."""
try:
result = literal_eval(render_result)
if type(result) in RESULT_WRAPPERS:
result = RESULT_WRAPPERS[type(result)](
result, render_result=render_result
)
# If the literal_eval result is a string, use the original
# render, by not returning right here. The evaluation of strings
# resulting in strings impacts quotes, to avoid unexpected
# output; use the original render instead of the evaluated one.
# Complex and scientific values are also unexpected. Filter them out.
if (
# Filter out string and complex numbers
not isinstance(result, (str, complex))
and (
# Pass if not numeric and not a boolean
not isinstance(result, (int, float))
# Or it's a boolean (inherit from int)
or isinstance(result, bool)
# Or if it's a digit
or _IS_NUMERIC.match(render_result) is not None
)
):
return result
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return render_result
async def async_render_will_timeout(
self,
timeout: float,
variables: TemplateVarsType = None,
strict: bool = False,
**kwargs: Any,
) -> bool:
"""Check to see if rendering a template will timeout during render.
This is intended to check for expensive templates
that will make the system unstable. The template
is rendered in the executor to ensure it does not
tie up the event loop.
This function is not a security control and is only
intended to be used as a safety check when testing
templates.
This method must be run in the event loop.
"""
if self.is_static:
return False
compiled = self._compiled or self._ensure_compiled(strict=strict)
if variables is not None:
kwargs.update(variables)
self._exc_info = None
finish_event = asyncio.Event()
def _render_template() -> None:
try:
_render_with_context(self.template, compiled, **kwargs)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
self._exc_info = sys.exc_info()
finally:
run_callback_threadsafe(self.hass.loop, finish_event.set)
try:
template_render_thread = ThreadWithException(target=_render_template)
template_render_thread.start()
await asyncio.wait_for(finish_event.wait(), timeout=timeout)
if self._exc_info:
raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2]))
except asyncio.TimeoutError:
template_render_thread.raise_exc(TimeoutError)
return True
finally:
template_render_thread.join()
return False
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, strict: bool = False, **kwargs: Any
) -> RenderInfo:
"""Render the template and collect an entity filter."""
assert self.hass and _RENDER_INFO not in self.hass.data
render_info = RenderInfo(self) # type: ignore[no-untyped-call]
# pylint: disable=protected-access
if self.is_static:
render_info._result = self.template.strip()
render_info._freeze_static()
return render_info
self.hass.data[_RENDER_INFO] = render_info
try:
render_info._result = self.async_render(variables, strict=strict, **kwargs)
except TemplateError as ex:
render_info.exception = ex
finally:
del self.hass.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
if self.is_static:
return self.template
return run_callback_threadsafe(
self.hass.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self.is_static:
return self.template
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
with suppress(ValueError, TypeError):
variables["value_json"] = json.loads(value)
try:
return _render_with_context(
self.template, self._compiled, **variables
).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(
self, limited: bool = False, strict: bool = False
) -> jinja2.Template:
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, "hass variable not set on template"
assert (
self._limited is None or self._limited == limited
), "can't change between limited and non limited template"
assert (
self._strict is None or self._strict == strict
), "can't change between strict and non strict template"
assert not (strict and limited), "can't combine strict and limited template"
self._limited = limited
self._strict = strict
env = self._env
self._compiled = cast(
jinja2.Template,
jinja2.Template.from_code(env, self._compiled_code, env.globals, None),
)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (
self.__class__ == other.__class__
and self.template == other.template
and self.hass == other.hass
)
def __hash__(self) -> int:
"""Hash code for template."""
return hash(self.template)
def __repr__(self) -> str:
"""Representation of Template."""
return 'Template("' + self.template + '")'
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
if "." in name:
return _get_state_if_valid(self._hass, name)
if name in _RESERVED_NAMES:
return None
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._hass, name)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_all(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states = True
def _collect_all_lifecycle(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states_lifecycle = True
def __iter__(self):
"""Return all states."""
self._collect_all()
return _state_generator(self._hass, None)
def __len__(self) -> int:
"""Return number of states."""
self._collect_all_lifecycle()
return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id):
"""Return the states."""
state = _get_state(self._hass, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self) -> str:
"""Representation of All States."""
return "<template AllStates>"
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass: HomeAssistant, domain: str) -> None:
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _get_state_if_valid(self._hass, f"{self._domain}.{name}")
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_domain(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains.add(self._domain)
def _collect_domain_lifecycle(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains_lifecycle.add(self._domain)
def __iter__(self):
"""Return the iteration over all the states."""
self._collect_domain()
return _state_generator(self._hass, self._domain)
def __len__(self) -> int:
"""Return number of states."""
self._collect_domain_lifecycle()
return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str:
"""Representation of Domain States."""
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
"""Class to represent a state object in a template."""
__slots__ = ("_hass", "_state", "_collect")
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, hass: HomeAssistant, state: State, collect: bool = True) -> None:
"""Initialize template state."""
self._hass = hass
self._state = state
self._collect = collect
def _collect_state(self) -> None:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
def __getitem__(self, item):
"""Return a property as an attribute for jinja."""
if item in _COLLECTABLE_STATE_ATTRIBUTES:
# _collect_state inlined here for performance
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
return getattr(self._state, item)
if item == "entity_id":
return self._state.entity_id
if item == "state_with_unit":
return self.state_with_unit
raise KeyError
@property
def entity_id(self):
"""Wrap State.entity_id.
Intentionally does not collect state
"""
return self._state.entity_id
@property
def state(self):
"""Wrap State.state."""
self._collect_state()
return self._state.state
@property
def attributes(self):
"""Wrap State.attributes."""
self._collect_state()
return self._state.attributes
@property
def last_changed(self):
"""Wrap State.last_changed."""
self._collect_state()
return self._state.last_changed
@property
def last_updated(self):
"""Wrap State.last_updated."""
self._collect_state()
return self._state.last_updated
@property
def context(self):
"""Wrap State.context."""
self._collect_state()
return self._state.context
@property
def domain(self):
"""Wrap State.domain."""
self._collect_state()
return self._state.domain
@property
def object_id(self):
"""Wrap State.object_id."""
self._collect_state()
return self._state.object_id
@property
def name(self):
"""Wrap State.name."""
self._collect_state()
return self._state.name
@property
def state_with_unit(self) -> str:
"""Return the state concatenated with the unit if available."""
self._collect_state()
unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return f"{self._state.state} {unit}" if unit else self._state.state
def __eq__(self, other: Any) -> bool:
"""Ensure we collect on equality check."""
self._collect_state()
return self._state.__eq__(other)
def __repr__(self) -> str:
"""Representation of Template State."""
return f"<template TemplateState({self._state.__repr__()})>"
def _collect_state(hass: HomeAssistant, entity_id: str) -> None:
entity_collect = hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.entities.add(entity_id)
def _state_generator(hass: HomeAssistant, domain: str | None) -> Generator:
"""State generator for a domain or all states."""
for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")):
yield TemplateState(hass, state, collect=False)
def _get_state_if_valid(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
state = hass.states.get(entity_id)
if state is None and not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'") # type: ignore
return _get_template_state_from_state(hass, entity_id, state)
def _get_state(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id))
def _get_template_state_from_state(
hass: HomeAssistant, entity_id: str, state: State | None
) -> TemplateState | None:
if state is None:
# Only need to collect if none, if not none collect first actual
# access to the state properties in the state wrapper.
_collect_state(hass, entity_id)
return None
return TemplateState(hass, state)
def _resolve_state(
hass: HomeAssistant, entity_id_or_state: Any
) -> State | TemplateState | None:
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(hass, entity_id_or_state)
return None
def result_as_boolean(template_result: str | None) -> bool:
"""Convert the template result to a boolean.
True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy
False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy
"""
try:
# Import here, not at top-level to avoid circular import
from homeassistant.helpers import ( # pylint: disable=import-outside-toplevel
config_validation as cv,
)
return cv.boolean(template_result)
except vol.Invalid:
return False
def expand(hass: HomeAssistant, *args: Any) -> Iterable[State]:
"""Expand out any groups into entity states."""
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(hass, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, collections.abc.Iterable):
search += entity
continue
else:
# ignore other types
continue
if entity_id.startswith(_GROUP_DOMAIN_PREFIX):
# Collect state will be called in here since it's wrapped
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
_collect_state(hass, entity_id)
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def device_entities(hass: HomeAssistant, device_id: str) -> Iterable[str]:
"""Get entity ids for entities tied to a device."""
entity_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_device(entity_reg, device_id)
return [entry.entity_id for entry in entries]
def closest(hass, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
"""
if len(args) == 1:
latitude = hass.config.latitude
longitude = hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(hass, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(hass, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
def closest_filter(hass, *args):
"""Call closest as a filter. Need to reorder arguments."""
new_args = list(args[1:])
new_args.append(args[0])
return closest(hass, *new_args)
def distance(hass, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, str) and not valid_entity_id(value):
point_state = None
else:
point_state = _resolve_state(hass, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return hass.config.distance(*locations[0])
return hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS
)
def is_state(hass: HomeAssistant, entity_id: str, state: State) -> bool:
"""Test if a state is a specific value."""
state_obj = _get_state(hass, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(hass, entity_id, name, value):
"""Test if a state's attribute is a specific value."""
attr = state_attr(hass, entity_id, name)
return attr is not None and attr == value
def state_attr(hass, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = _get_state(hass, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def now(hass):
"""Record fetching now."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.now()
def utcnow(hass):
"""Record fetching utcnow."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.utcnow()
def forgiving_round(value, precision=0, method="common"):
"""Round accepted strings."""
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
"""Filter to get arc sine of the value."""
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
"""Filter to get arc cosine of the value."""
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
"""Filter to get arc tangent of the value."""
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
"""Filter to calculate four quadrant arc tangent of y / x."""
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError, TypeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
"""Convert a JSON string to an object."""
return json.loads(value)
def to_json(value):
"""Convert an object to a JSON string."""
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
def relative_time(value):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
"""
if not isinstance(value, datetime):
return value
if not value.tzinfo:
value = dt_util.as_local(value)
if dt_util.now() < value:
return value
return dt_util.get_age(value)
def urlencode(value):
"""Urlencode dictionary and return as UTF-8 string."""
return urllib_urlencode(value).encode("utf-8")
def _render_with_context(
template_str: str, template: jinja2.Template, **kwargs: Any
) -> str:
"""Store template being rendered in a ContextVar to aid error handling."""
template_cv.set(template_str)
return template.render(**kwargs)
class LoggingUndefined(jinja2.Undefined):
"""Log on undefined variables."""
def _log_message(self):
template = template_cv.get() or ""
_LOGGER.warning(
"Template variable warning: %s when rendering '%s'",
self._undefined_message,
template,
)
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as ex:
template = template_cv.get() or ""
_LOGGER.error(
"Template variable error: %s when rendering '%s'",
self._undefined_message,
template,
)
raise ex
def __str__(self):
"""Log undefined __str___."""
self._log_message()
return super().__str__()
def __iter__(self):
"""Log undefined __iter___."""
self._log_message()
return super().__iter__()
def __bool__(self):
"""Log undefined __bool___."""
self._log_message()
return super().__bool__()
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def __init__(self, hass, limited=False, strict=False):
"""Initialise template environment."""
if not strict:
undefined = LoggingUndefined
else:
undefined = jinja2.StrictUndefined
super().__init__(undefined=undefined)
self.hass = hass
self.template_cache = weakref.WeakValueDictionary()
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["as_local"] = dt_util.as_local
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["as_local"] = dt_util.as_local
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = relative_time
self.globals["timedelta"] = timedelta
self.globals["strptime"] = strptime
self.globals["urlencode"] = urlencode
self.globals["max"] = max
self.globals["min"] = min
if hass is None:
return
# We mark these as a context functions to ensure they get
# evaluated fresh with every execution, rather than executed
# at compile time and the value stored. The context itself
# can be discarded, we only need to get at the hass object.
def hassfunction(func):
"""Wrap function that depend on hass."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(hass, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["device_entities"] = hassfunction(device_entities)
self.filters["device_entities"] = contextfilter(self.globals["device_entities"])
if limited:
# Only device_entities is available to limited templates, mark other
# functions and filters as unsupported.
def unsupported(name):
def warn_unsupported(*args, **kwargs):
raise TemplateError(
f"Use of '{name}' is not supported in limited templates"
)
return warn_unsupported
hass_globals = [
"closest",
"distance",
"expand",
"is_state",
"is_state_attr",
"state_attr",
"states",
"utcnow",
"now",
]
hass_filters = ["closest", "expand"]
for glob in hass_globals:
self.globals[glob] = unsupported(glob)
for filt in hass_filters:
self.filters[filt] = unsupported(filt)
return
self.globals["expand"] = hassfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = hassfunction(closest)
self.filters["closest"] = contextfilter(hassfunction(closest_filter))
self.globals["distance"] = hassfunction(distance)
self.globals["is_state"] = hassfunction(is_state)
self.globals["is_state_attr"] = hassfunction(is_state_attr)
self.globals["state_attr"] = hassfunction(state_attr)
self.globals["states"] = AllStates(hass)
self.globals["utcnow"] = hassfunction(utcnow)
self.globals["now"] = hassfunction(now)
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
if isinstance(obj, (AllStates, DomainStates, TemplateState)):
return attr[0] != "_"
if isinstance(obj, Namespace):
return True
return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile the template."""
if (
name is not None
or filename is not None
or raw is not False
or defer_init is not False
):
# If there are any non-default keywords args, we do
# not cache. In prodution we currently do not have
# any instance of this.
return super().compile(source, name, filename, raw, defer_init)
cached = self.template_cache.get(source)
if cached is None:
cached = self.template_cache[source] = super().compile(source)
return cached
_NO_HASS_ENV = TemplateEnvironment(None) # type: ignore[no-untyped-call]
|
async_render
|
Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
|
"""Template helper methods for rendering strings with Home Assistant data."""
from __future__ import annotations
from ast import literal_eval
import asyncio
import base64
import collections.abc
from contextlib import suppress
from contextvars import ContextVar
from datetime import datetime, timedelta
from functools import partial, wraps
import json
import logging
import math
from operator import attrgetter
import random
import re
import sys
from typing import Any, Generator, Iterable, cast
from urllib.parse import urlencode as urllib_urlencode
import weakref
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace # type: ignore
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
STATE_UNKNOWN,
)
from homeassistant.core import (
HomeAssistant,
State,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import entity_registry, location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.thread import ThreadWithException
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_ENVIRONMENT_LIMITED = "template.environment_limited"
_ENVIRONMENT_STRICT = "template.environment_strict"
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#")
# Match "simple" ints and floats. -1.0, 1, +5, 5.0
_IS_NUMERIC = re.compile(r"^[+-]?(?!0\d)\d*(?:\.\d*)?$")
_RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"}
_GROUP_DOMAIN_PREFIX = "group."
_COLLECTABLE_STATE_ATTRIBUTES = {
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"name",
}
ALL_STATES_RATE_LIMIT = timedelta(minutes=1)
DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1)
template_cv: ContextVar[str | None] = ContextVar("template_cv", default=None)
@bind_hass
def attach(hass: HomeAssistant, obj: Any) -> None:
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, collections.abc.Mapping):
for child_key, child_value in obj.items():
attach(hass, child_key)
attach(hass, child_value)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(
value: Any, variables: TemplateVarsType = None, limited: bool = False
) -> Any:
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, collections.abc.Mapping):
return {
render_complex(key, variables): render_complex(item, variables)
for key, item in value.items()
}
if isinstance(value, Template):
return value.async_render(variables, limited=limited)
return value
def is_complex(value: Any) -> bool:
"""Test if data structure is a complex template."""
if isinstance(value, Template):
return True
if isinstance(value, list):
return any(is_complex(val) for val in value)
if isinstance(value, collections.abc.Mapping):
return any(is_complex(val) for val in value.keys()) or any(
is_complex(val) for val in value.values()
)
return False
def is_template_string(maybe_template: str) -> bool:
"""Check if the input is a Jinja2 template."""
return _RE_JINJA_DELIMITERS.search(maybe_template) is not None
class ResultWrapper:
"""Result wrapper class to store render result."""
render_result: str | None
def gen_result_wrapper(kls):
"""Generate a result wrapper."""
class Wrapper(kls, ResultWrapper):
"""Wrapper of a kls that can store render_result."""
def __init__(self, *args: tuple, render_result: str | None = None) -> None:
super().__init__(*args)
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
# Can't get set repr to work
if kls is set:
return str(set(self))
return cast(str, kls.__str__(self))
return self.render_result
return Wrapper
class TupleWrapper(tuple, ResultWrapper):
"""Wrap a tuple."""
# This is all magic to be allowed to subclass a tuple.
def __new__(cls, value: tuple, *, render_result: str | None = None) -> TupleWrapper:
"""Create a new tuple class."""
return super().__new__(cls, tuple(value))
# pylint: disable=super-init-not-called
def __init__(self, value: tuple, *, render_result: str | None = None):
"""Initialize a new tuple class."""
self.render_result = render_result
def __str__(self) -> str:
"""Return string representation."""
if self.render_result is None:
return super().__str__()
return self.render_result
RESULT_WRAPPERS: dict[type, type] = {
kls: gen_result_wrapper(kls) # type: ignore[no-untyped-call]
for kls in (list, dict, set)
}
RESULT_WRAPPERS[tuple] = TupleWrapper
def _true(arg: Any) -> bool:
return True
def _false(arg: Any) -> bool:
return False
class RenderInfo:
"""Holds information about a template render."""
def __init__(self, template):
"""Initialise."""
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self.filter = _true
self._result: str | None = None
self.is_static = False
self.exception: TemplateError | None = None
self.all_states = False
self.all_states_lifecycle = False
self.domains = set()
self.domains_lifecycle = set()
self.entities = set()
self.rate_limit: timedelta | None = None
self.has_time = False
def __repr__(self) -> str:
"""Representation of RenderInfo."""
return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}"
def _filter_domains_and_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific domains or entities."""
return (
split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities
)
def _filter_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific entities."""
return entity_id in self.entities
def _filter_lifecycle_domains(self, entity_id: str) -> bool:
"""Template should re-render if the entity is added or removed with domains watched."""
return split_entity_id(entity_id)[0] in self.domains_lifecycle
def result(self) -> str:
"""Results of the template computation."""
if self.exception is not None:
raise self.exception
return cast(str, self._result)
def _freeze_static(self) -> None:
self.is_static = True
self._freeze_sets()
self.all_states = False
def _freeze_sets(self) -> None:
self.entities = frozenset(self.entities)
self.domains = frozenset(self.domains)
self.domains_lifecycle = frozenset(self.domains_lifecycle)
def _freeze(self) -> None:
self._freeze_sets()
if self.rate_limit is None:
if self.all_states or self.exception:
self.rate_limit = ALL_STATES_RATE_LIMIT
elif self.domains or self.domains_lifecycle:
self.rate_limit = DOMAIN_STATES_RATE_LIMIT
if self.exception:
return
if not self.all_states_lifecycle:
if self.domains_lifecycle:
self.filter_lifecycle = self._filter_lifecycle_domains
else:
self.filter_lifecycle = _false
if self.all_states:
return
if self.domains:
self.filter = self._filter_domains_and_entities
elif self.entities:
self.filter = self._filter_entities
else:
self.filter = _false
class Template:
"""Class to hold a template and manage caching and rendering."""
__slots__ = (
"__weakref__",
"template",
"hass",
"is_static",
"_compiled_code",
"_compiled",
"_exc_info",
"_limited",
"_strict",
)
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template.strip()
self._compiled_code = None
self._compiled: jinja2.Template | None = None
self.hass = hass
self.is_static = not is_template_string(template)
self._exc_info = None
self._limited = None
self._strict = None
@property
def _env(self) -> TemplateEnvironment:
if self.hass is None:
return _NO_HASS_ENV
if self._limited:
wanted_env = _ENVIRONMENT_LIMITED
elif self._strict:
wanted_env = _ENVIRONMENT_STRICT
else:
wanted_env = _ENVIRONMENT
ret: TemplateEnvironment | None = self.hass.data.get(wanted_env)
if ret is None:
ret = self.hass.data[wanted_env] = TemplateEnvironment(self.hass, self._limited, self._strict) # type: ignore[no-untyped-call]
return ret
def ensure_valid(self) -> None:
"""Return if template is valid."""
if self.is_static or self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template) # type: ignore[no-untyped-call]
except jinja2.TemplateError as err:
raise TemplateError(err) from err
def render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
return run_callback_threadsafe(
self.hass.loop,
partial(self.async_render, variables, parse_result, limited, **kwargs),
).result()
# MASKED: async_render function (lines 362-397)
def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use
"""Parse the result."""
try:
result = literal_eval(render_result)
if type(result) in RESULT_WRAPPERS:
result = RESULT_WRAPPERS[type(result)](
result, render_result=render_result
)
# If the literal_eval result is a string, use the original
# render, by not returning right here. The evaluation of strings
# resulting in strings impacts quotes, to avoid unexpected
# output; use the original render instead of the evaluated one.
# Complex and scientific values are also unexpected. Filter them out.
if (
# Filter out string and complex numbers
not isinstance(result, (str, complex))
and (
# Pass if not numeric and not a boolean
not isinstance(result, (int, float))
# Or it's a boolean (inherit from int)
or isinstance(result, bool)
# Or if it's a digit
or _IS_NUMERIC.match(render_result) is not None
)
):
return result
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return render_result
async def async_render_will_timeout(
self,
timeout: float,
variables: TemplateVarsType = None,
strict: bool = False,
**kwargs: Any,
) -> bool:
"""Check to see if rendering a template will timeout during render.
This is intended to check for expensive templates
that will make the system unstable. The template
is rendered in the executor to ensure it does not
tie up the event loop.
This function is not a security control and is only
intended to be used as a safety check when testing
templates.
This method must be run in the event loop.
"""
if self.is_static:
return False
compiled = self._compiled or self._ensure_compiled(strict=strict)
if variables is not None:
kwargs.update(variables)
self._exc_info = None
finish_event = asyncio.Event()
def _render_template() -> None:
try:
_render_with_context(self.template, compiled, **kwargs)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
self._exc_info = sys.exc_info()
finally:
run_callback_threadsafe(self.hass.loop, finish_event.set)
try:
template_render_thread = ThreadWithException(target=_render_template)
template_render_thread.start()
await asyncio.wait_for(finish_event.wait(), timeout=timeout)
if self._exc_info:
raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2]))
except asyncio.TimeoutError:
template_render_thread.raise_exc(TimeoutError)
return True
finally:
template_render_thread.join()
return False
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, strict: bool = False, **kwargs: Any
) -> RenderInfo:
"""Render the template and collect an entity filter."""
assert self.hass and _RENDER_INFO not in self.hass.data
render_info = RenderInfo(self) # type: ignore[no-untyped-call]
# pylint: disable=protected-access
if self.is_static:
render_info._result = self.template.strip()
render_info._freeze_static()
return render_info
self.hass.data[_RENDER_INFO] = render_info
try:
render_info._result = self.async_render(variables, strict=strict, **kwargs)
except TemplateError as ex:
render_info.exception = ex
finally:
del self.hass.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
if self.is_static:
return self.template
return run_callback_threadsafe(
self.hass.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self.is_static:
return self.template
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
with suppress(ValueError, TypeError):
variables["value_json"] = json.loads(value)
try:
return _render_with_context(
self.template, self._compiled, **variables
).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(
self, limited: bool = False, strict: bool = False
) -> jinja2.Template:
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, "hass variable not set on template"
assert (
self._limited is None or self._limited == limited
), "can't change between limited and non limited template"
assert (
self._strict is None or self._strict == strict
), "can't change between strict and non strict template"
assert not (strict and limited), "can't combine strict and limited template"
self._limited = limited
self._strict = strict
env = self._env
self._compiled = cast(
jinja2.Template,
jinja2.Template.from_code(env, self._compiled_code, env.globals, None),
)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (
self.__class__ == other.__class__
and self.template == other.template
and self.hass == other.hass
)
def __hash__(self) -> int:
"""Hash code for template."""
return hash(self.template)
def __repr__(self) -> str:
"""Representation of Template."""
return 'Template("' + self.template + '")'
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
if "." in name:
return _get_state_if_valid(self._hass, name)
if name in _RESERVED_NAMES:
return None
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._hass, name)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_all(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states = True
def _collect_all_lifecycle(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states_lifecycle = True
def __iter__(self):
"""Return all states."""
self._collect_all()
return _state_generator(self._hass, None)
def __len__(self) -> int:
"""Return number of states."""
self._collect_all_lifecycle()
return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id):
"""Return the states."""
state = _get_state(self._hass, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self) -> str:
"""Representation of All States."""
return "<template AllStates>"
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass: HomeAssistant, domain: str) -> None:
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _get_state_if_valid(self._hass, f"{self._domain}.{name}")
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_domain(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains.add(self._domain)
def _collect_domain_lifecycle(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains_lifecycle.add(self._domain)
def __iter__(self):
"""Return the iteration over all the states."""
self._collect_domain()
return _state_generator(self._hass, self._domain)
def __len__(self) -> int:
"""Return number of states."""
self._collect_domain_lifecycle()
return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str:
"""Representation of Domain States."""
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
"""Class to represent a state object in a template."""
__slots__ = ("_hass", "_state", "_collect")
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, hass: HomeAssistant, state: State, collect: bool = True) -> None:
"""Initialize template state."""
self._hass = hass
self._state = state
self._collect = collect
def _collect_state(self) -> None:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
def __getitem__(self, item):
"""Return a property as an attribute for jinja."""
if item in _COLLECTABLE_STATE_ATTRIBUTES:
# _collect_state inlined here for performance
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
return getattr(self._state, item)
if item == "entity_id":
return self._state.entity_id
if item == "state_with_unit":
return self.state_with_unit
raise KeyError
@property
def entity_id(self):
"""Wrap State.entity_id.
Intentionally does not collect state
"""
return self._state.entity_id
@property
def state(self):
"""Wrap State.state."""
self._collect_state()
return self._state.state
@property
def attributes(self):
"""Wrap State.attributes."""
self._collect_state()
return self._state.attributes
@property
def last_changed(self):
"""Wrap State.last_changed."""
self._collect_state()
return self._state.last_changed
@property
def last_updated(self):
"""Wrap State.last_updated."""
self._collect_state()
return self._state.last_updated
@property
def context(self):
"""Wrap State.context."""
self._collect_state()
return self._state.context
@property
def domain(self):
"""Wrap State.domain."""
self._collect_state()
return self._state.domain
@property
def object_id(self):
"""Wrap State.object_id."""
self._collect_state()
return self._state.object_id
@property
def name(self):
"""Wrap State.name."""
self._collect_state()
return self._state.name
@property
def state_with_unit(self) -> str:
"""Return the state concatenated with the unit if available."""
self._collect_state()
unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return f"{self._state.state} {unit}" if unit else self._state.state
def __eq__(self, other: Any) -> bool:
"""Ensure we collect on equality check."""
self._collect_state()
return self._state.__eq__(other)
def __repr__(self) -> str:
"""Representation of Template State."""
return f"<template TemplateState({self._state.__repr__()})>"
def _collect_state(hass: HomeAssistant, entity_id: str) -> None:
entity_collect = hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.entities.add(entity_id)
def _state_generator(hass: HomeAssistant, domain: str | None) -> Generator:
"""State generator for a domain or all states."""
for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")):
yield TemplateState(hass, state, collect=False)
def _get_state_if_valid(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
state = hass.states.get(entity_id)
if state is None and not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'") # type: ignore
return _get_template_state_from_state(hass, entity_id, state)
def _get_state(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id))
def _get_template_state_from_state(
hass: HomeAssistant, entity_id: str, state: State | None
) -> TemplateState | None:
if state is None:
# Only need to collect if none, if not none collect first actual
# access to the state properties in the state wrapper.
_collect_state(hass, entity_id)
return None
return TemplateState(hass, state)
def _resolve_state(
hass: HomeAssistant, entity_id_or_state: Any
) -> State | TemplateState | None:
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(hass, entity_id_or_state)
return None
def result_as_boolean(template_result: str | None) -> bool:
"""Convert the template result to a boolean.
True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy
False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy
"""
try:
# Import here, not at top-level to avoid circular import
from homeassistant.helpers import ( # pylint: disable=import-outside-toplevel
config_validation as cv,
)
return cv.boolean(template_result)
except vol.Invalid:
return False
def expand(hass: HomeAssistant, *args: Any) -> Iterable[State]:
"""Expand out any groups into entity states."""
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(hass, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, collections.abc.Iterable):
search += entity
continue
else:
# ignore other types
continue
if entity_id.startswith(_GROUP_DOMAIN_PREFIX):
# Collect state will be called in here since it's wrapped
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
_collect_state(hass, entity_id)
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def device_entities(hass: HomeAssistant, device_id: str) -> Iterable[str]:
"""Get entity ids for entities tied to a device."""
entity_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_device(entity_reg, device_id)
return [entry.entity_id for entry in entries]
def closest(hass, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
"""
if len(args) == 1:
latitude = hass.config.latitude
longitude = hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(hass, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(hass, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
def closest_filter(hass, *args):
"""Call closest as a filter. Need to reorder arguments."""
new_args = list(args[1:])
new_args.append(args[0])
return closest(hass, *new_args)
def distance(hass, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, str) and not valid_entity_id(value):
point_state = None
else:
point_state = _resolve_state(hass, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return hass.config.distance(*locations[0])
return hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS
)
def is_state(hass: HomeAssistant, entity_id: str, state: State) -> bool:
"""Test if a state is a specific value."""
state_obj = _get_state(hass, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(hass, entity_id, name, value):
"""Test if a state's attribute is a specific value."""
attr = state_attr(hass, entity_id, name)
return attr is not None and attr == value
def state_attr(hass, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = _get_state(hass, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def now(hass):
"""Record fetching now."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.now()
def utcnow(hass):
"""Record fetching utcnow."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.utcnow()
def forgiving_round(value, precision=0, method="common"):
"""Round accepted strings."""
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
"""Filter to get arc sine of the value."""
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
"""Filter to get arc cosine of the value."""
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
"""Filter to get arc tangent of the value."""
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
"""Filter to calculate four quadrant arc tangent of y / x."""
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError, TypeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
"""Convert a JSON string to an object."""
return json.loads(value)
def to_json(value):
"""Convert an object to a JSON string."""
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
def relative_time(value):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
"""
if not isinstance(value, datetime):
return value
if not value.tzinfo:
value = dt_util.as_local(value)
if dt_util.now() < value:
return value
return dt_util.get_age(value)
def urlencode(value):
"""Urlencode dictionary and return as UTF-8 string."""
return urllib_urlencode(value).encode("utf-8")
def _render_with_context(
template_str: str, template: jinja2.Template, **kwargs: Any
) -> str:
"""Store template being rendered in a ContextVar to aid error handling."""
template_cv.set(template_str)
return template.render(**kwargs)
class LoggingUndefined(jinja2.Undefined):
"""Log on undefined variables."""
def _log_message(self):
template = template_cv.get() or ""
_LOGGER.warning(
"Template variable warning: %s when rendering '%s'",
self._undefined_message,
template,
)
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as ex:
template = template_cv.get() or ""
_LOGGER.error(
"Template variable error: %s when rendering '%s'",
self._undefined_message,
template,
)
raise ex
def __str__(self):
"""Log undefined __str___."""
self._log_message()
return super().__str__()
def __iter__(self):
"""Log undefined __iter___."""
self._log_message()
return super().__iter__()
def __bool__(self):
"""Log undefined __bool___."""
self._log_message()
return super().__bool__()
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def __init__(self, hass, limited=False, strict=False):
"""Initialise template environment."""
if not strict:
undefined = LoggingUndefined
else:
undefined = jinja2.StrictUndefined
super().__init__(undefined=undefined)
self.hass = hass
self.template_cache = weakref.WeakValueDictionary()
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["as_local"] = dt_util.as_local
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["as_local"] = dt_util.as_local
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = relative_time
self.globals["timedelta"] = timedelta
self.globals["strptime"] = strptime
self.globals["urlencode"] = urlencode
self.globals["max"] = max
self.globals["min"] = min
if hass is None:
return
# We mark these as a context functions to ensure they get
# evaluated fresh with every execution, rather than executed
# at compile time and the value stored. The context itself
# can be discarded, we only need to get at the hass object.
def hassfunction(func):
"""Wrap function that depend on hass."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(hass, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["device_entities"] = hassfunction(device_entities)
self.filters["device_entities"] = contextfilter(self.globals["device_entities"])
if limited:
# Only device_entities is available to limited templates, mark other
# functions and filters as unsupported.
def unsupported(name):
def warn_unsupported(*args, **kwargs):
raise TemplateError(
f"Use of '{name}' is not supported in limited templates"
)
return warn_unsupported
hass_globals = [
"closest",
"distance",
"expand",
"is_state",
"is_state_attr",
"state_attr",
"states",
"utcnow",
"now",
]
hass_filters = ["closest", "expand"]
for glob in hass_globals:
self.globals[glob] = unsupported(glob)
for filt in hass_filters:
self.filters[filt] = unsupported(filt)
return
self.globals["expand"] = hassfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = hassfunction(closest)
self.filters["closest"] = contextfilter(hassfunction(closest_filter))
self.globals["distance"] = hassfunction(distance)
self.globals["is_state"] = hassfunction(is_state)
self.globals["is_state_attr"] = hassfunction(is_state_attr)
self.globals["state_attr"] = hassfunction(state_attr)
self.globals["states"] = AllStates(hass)
self.globals["utcnow"] = hassfunction(utcnow)
self.globals["now"] = hassfunction(now)
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
if isinstance(obj, (AllStates, DomainStates, TemplateState)):
return attr[0] != "_"
if isinstance(obj, Namespace):
return True
return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile the template."""
if (
name is not None
or filename is not None
or raw is not False
or defer_init is not False
):
# If there are any non-default keywords args, we do
# not cache. In prodution we currently do not have
# any instance of this.
return super().compile(source, name, filename, raw, defer_init)
cached = self.template_cache.get(source)
if cached is None:
cached = self.template_cache[source] = super().compile(source)
return cached
_NO_HASS_ENV = TemplateEnvironment(None) # type: ignore[no-untyped-call]
|
@callback
def async_render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
strict: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
compiled = self._compiled or self._ensure_compiled(limited, strict)
if variables is not None:
kwargs.update(variables)
try:
render_result = _render_with_context(self.template, compiled, **kwargs)
except Exception as err:
raise TemplateError(err) from err
render_result = render_result.strip()
if self.hass.config.legacy_templates or not parse_result:
return render_result
return self._parse_result(render_result)
| 362
| 397
|
"""Template helper methods for rendering strings with Home Assistant data."""
from __future__ import annotations
from ast import literal_eval
import asyncio
import base64
import collections.abc
from contextlib import suppress
from contextvars import ContextVar
from datetime import datetime, timedelta
from functools import partial, wraps
import json
import logging
import math
from operator import attrgetter
import random
import re
import sys
from typing import Any, Generator, Iterable, cast
from urllib.parse import urlencode as urllib_urlencode
import weakref
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace # type: ignore
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
STATE_UNKNOWN,
)
from homeassistant.core import (
HomeAssistant,
State,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import entity_registry, location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.thread import ThreadWithException
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_ENVIRONMENT_LIMITED = "template.environment_limited"
_ENVIRONMENT_STRICT = "template.environment_strict"
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#")
# Match "simple" ints and floats. -1.0, 1, +5, 5.0
_IS_NUMERIC = re.compile(r"^[+-]?(?!0\d)\d*(?:\.\d*)?$")
_RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"}
_GROUP_DOMAIN_PREFIX = "group."
_COLLECTABLE_STATE_ATTRIBUTES = {
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"name",
}
ALL_STATES_RATE_LIMIT = timedelta(minutes=1)
DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1)
template_cv: ContextVar[str | None] = ContextVar("template_cv", default=None)
@bind_hass
def attach(hass: HomeAssistant, obj: Any) -> None:
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, collections.abc.Mapping):
for child_key, child_value in obj.items():
attach(hass, child_key)
attach(hass, child_value)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(
value: Any, variables: TemplateVarsType = None, limited: bool = False
) -> Any:
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, collections.abc.Mapping):
return {
render_complex(key, variables): render_complex(item, variables)
for key, item in value.items()
}
if isinstance(value, Template):
return value.async_render(variables, limited=limited)
return value
def is_complex(value: Any) -> bool:
"""Test if data structure is a complex template."""
if isinstance(value, Template):
return True
if isinstance(value, list):
return any(is_complex(val) for val in value)
if isinstance(value, collections.abc.Mapping):
return any(is_complex(val) for val in value.keys()) or any(
is_complex(val) for val in value.values()
)
return False
def is_template_string(maybe_template: str) -> bool:
"""Check if the input is a Jinja2 template."""
return _RE_JINJA_DELIMITERS.search(maybe_template) is not None
class ResultWrapper:
"""Result wrapper class to store render result."""
render_result: str | None
def gen_result_wrapper(kls):
"""Generate a result wrapper."""
class Wrapper(kls, ResultWrapper):
"""Wrapper of a kls that can store render_result."""
def __init__(self, *args: tuple, render_result: str | None = None) -> None:
super().__init__(*args)
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
# Can't get set repr to work
if kls is set:
return str(set(self))
return cast(str, kls.__str__(self))
return self.render_result
return Wrapper
class TupleWrapper(tuple, ResultWrapper):
"""Wrap a tuple."""
# This is all magic to be allowed to subclass a tuple.
def __new__(cls, value: tuple, *, render_result: str | None = None) -> TupleWrapper:
"""Create a new tuple class."""
return super().__new__(cls, tuple(value))
# pylint: disable=super-init-not-called
def __init__(self, value: tuple, *, render_result: str | None = None):
"""Initialize a new tuple class."""
self.render_result = render_result
def __str__(self) -> str:
"""Return string representation."""
if self.render_result is None:
return super().__str__()
return self.render_result
RESULT_WRAPPERS: dict[type, type] = {
kls: gen_result_wrapper(kls) # type: ignore[no-untyped-call]
for kls in (list, dict, set)
}
RESULT_WRAPPERS[tuple] = TupleWrapper
def _true(arg: Any) -> bool:
return True
def _false(arg: Any) -> bool:
return False
class RenderInfo:
"""Holds information about a template render."""
def __init__(self, template):
"""Initialise."""
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self.filter = _true
self._result: str | None = None
self.is_static = False
self.exception: TemplateError | None = None
self.all_states = False
self.all_states_lifecycle = False
self.domains = set()
self.domains_lifecycle = set()
self.entities = set()
self.rate_limit: timedelta | None = None
self.has_time = False
def __repr__(self) -> str:
"""Representation of RenderInfo."""
return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}"
def _filter_domains_and_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific domains or entities."""
return (
split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities
)
def _filter_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific entities."""
return entity_id in self.entities
def _filter_lifecycle_domains(self, entity_id: str) -> bool:
"""Template should re-render if the entity is added or removed with domains watched."""
return split_entity_id(entity_id)[0] in self.domains_lifecycle
def result(self) -> str:
"""Results of the template computation."""
if self.exception is not None:
raise self.exception
return cast(str, self._result)
def _freeze_static(self) -> None:
self.is_static = True
self._freeze_sets()
self.all_states = False
def _freeze_sets(self) -> None:
self.entities = frozenset(self.entities)
self.domains = frozenset(self.domains)
self.domains_lifecycle = frozenset(self.domains_lifecycle)
def _freeze(self) -> None:
self._freeze_sets()
if self.rate_limit is None:
if self.all_states or self.exception:
self.rate_limit = ALL_STATES_RATE_LIMIT
elif self.domains or self.domains_lifecycle:
self.rate_limit = DOMAIN_STATES_RATE_LIMIT
if self.exception:
return
if not self.all_states_lifecycle:
if self.domains_lifecycle:
self.filter_lifecycle = self._filter_lifecycle_domains
else:
self.filter_lifecycle = _false
if self.all_states:
return
if self.domains:
self.filter = self._filter_domains_and_entities
elif self.entities:
self.filter = self._filter_entities
else:
self.filter = _false
class Template:
"""Class to hold a template and manage caching and rendering."""
__slots__ = (
"__weakref__",
"template",
"hass",
"is_static",
"_compiled_code",
"_compiled",
"_exc_info",
"_limited",
"_strict",
)
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template.strip()
self._compiled_code = None
self._compiled: jinja2.Template | None = None
self.hass = hass
self.is_static = not is_template_string(template)
self._exc_info = None
self._limited = None
self._strict = None
@property
def _env(self) -> TemplateEnvironment:
if self.hass is None:
return _NO_HASS_ENV
if self._limited:
wanted_env = _ENVIRONMENT_LIMITED
elif self._strict:
wanted_env = _ENVIRONMENT_STRICT
else:
wanted_env = _ENVIRONMENT
ret: TemplateEnvironment | None = self.hass.data.get(wanted_env)
if ret is None:
ret = self.hass.data[wanted_env] = TemplateEnvironment(self.hass, self._limited, self._strict) # type: ignore[no-untyped-call]
return ret
def ensure_valid(self) -> None:
"""Return if template is valid."""
if self.is_static or self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template) # type: ignore[no-untyped-call]
except jinja2.TemplateError as err:
raise TemplateError(err) from err
def render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
return run_callback_threadsafe(
self.hass.loop,
partial(self.async_render, variables, parse_result, limited, **kwargs),
).result()
@callback
def async_render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
strict: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
compiled = self._compiled or self._ensure_compiled(limited, strict)
if variables is not None:
kwargs.update(variables)
try:
render_result = _render_with_context(self.template, compiled, **kwargs)
except Exception as err:
raise TemplateError(err) from err
render_result = render_result.strip()
if self.hass.config.legacy_templates or not parse_result:
return render_result
return self._parse_result(render_result)
def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use
"""Parse the result."""
try:
result = literal_eval(render_result)
if type(result) in RESULT_WRAPPERS:
result = RESULT_WRAPPERS[type(result)](
result, render_result=render_result
)
# If the literal_eval result is a string, use the original
# render, by not returning right here. The evaluation of strings
# resulting in strings impacts quotes, to avoid unexpected
# output; use the original render instead of the evaluated one.
# Complex and scientific values are also unexpected. Filter them out.
if (
# Filter out string and complex numbers
not isinstance(result, (str, complex))
and (
# Pass if not numeric and not a boolean
not isinstance(result, (int, float))
# Or it's a boolean (inherit from int)
or isinstance(result, bool)
# Or if it's a digit
or _IS_NUMERIC.match(render_result) is not None
)
):
return result
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return render_result
async def async_render_will_timeout(
self,
timeout: float,
variables: TemplateVarsType = None,
strict: bool = False,
**kwargs: Any,
) -> bool:
"""Check to see if rendering a template will timeout during render.
This is intended to check for expensive templates
that will make the system unstable. The template
is rendered in the executor to ensure it does not
tie up the event loop.
This function is not a security control and is only
intended to be used as a safety check when testing
templates.
This method must be run in the event loop.
"""
if self.is_static:
return False
compiled = self._compiled or self._ensure_compiled(strict=strict)
if variables is not None:
kwargs.update(variables)
self._exc_info = None
finish_event = asyncio.Event()
def _render_template() -> None:
try:
_render_with_context(self.template, compiled, **kwargs)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
self._exc_info = sys.exc_info()
finally:
run_callback_threadsafe(self.hass.loop, finish_event.set)
try:
template_render_thread = ThreadWithException(target=_render_template)
template_render_thread.start()
await asyncio.wait_for(finish_event.wait(), timeout=timeout)
if self._exc_info:
raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2]))
except asyncio.TimeoutError:
template_render_thread.raise_exc(TimeoutError)
return True
finally:
template_render_thread.join()
return False
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, strict: bool = False, **kwargs: Any
) -> RenderInfo:
"""Render the template and collect an entity filter."""
assert self.hass and _RENDER_INFO not in self.hass.data
render_info = RenderInfo(self) # type: ignore[no-untyped-call]
# pylint: disable=protected-access
if self.is_static:
render_info._result = self.template.strip()
render_info._freeze_static()
return render_info
self.hass.data[_RENDER_INFO] = render_info
try:
render_info._result = self.async_render(variables, strict=strict, **kwargs)
except TemplateError as ex:
render_info.exception = ex
finally:
del self.hass.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
if self.is_static:
return self.template
return run_callback_threadsafe(
self.hass.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self.is_static:
return self.template
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
with suppress(ValueError, TypeError):
variables["value_json"] = json.loads(value)
try:
return _render_with_context(
self.template, self._compiled, **variables
).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(
self, limited: bool = False, strict: bool = False
) -> jinja2.Template:
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, "hass variable not set on template"
assert (
self._limited is None or self._limited == limited
), "can't change between limited and non limited template"
assert (
self._strict is None or self._strict == strict
), "can't change between strict and non strict template"
assert not (strict and limited), "can't combine strict and limited template"
self._limited = limited
self._strict = strict
env = self._env
self._compiled = cast(
jinja2.Template,
jinja2.Template.from_code(env, self._compiled_code, env.globals, None),
)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (
self.__class__ == other.__class__
and self.template == other.template
and self.hass == other.hass
)
def __hash__(self) -> int:
"""Hash code for template."""
return hash(self.template)
def __repr__(self) -> str:
"""Representation of Template."""
return 'Template("' + self.template + '")'
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
if "." in name:
return _get_state_if_valid(self._hass, name)
if name in _RESERVED_NAMES:
return None
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._hass, name)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_all(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states = True
def _collect_all_lifecycle(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states_lifecycle = True
def __iter__(self):
"""Return all states."""
self._collect_all()
return _state_generator(self._hass, None)
def __len__(self) -> int:
"""Return number of states."""
self._collect_all_lifecycle()
return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id):
"""Return the states."""
state = _get_state(self._hass, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self) -> str:
"""Representation of All States."""
return "<template AllStates>"
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass: HomeAssistant, domain: str) -> None:
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _get_state_if_valid(self._hass, f"{self._domain}.{name}")
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_domain(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains.add(self._domain)
def _collect_domain_lifecycle(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains_lifecycle.add(self._domain)
def __iter__(self):
"""Return the iteration over all the states."""
self._collect_domain()
return _state_generator(self._hass, self._domain)
def __len__(self) -> int:
"""Return number of states."""
self._collect_domain_lifecycle()
return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str:
"""Representation of Domain States."""
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
"""Class to represent a state object in a template."""
__slots__ = ("_hass", "_state", "_collect")
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, hass: HomeAssistant, state: State, collect: bool = True) -> None:
"""Initialize template state."""
self._hass = hass
self._state = state
self._collect = collect
def _collect_state(self) -> None:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
def __getitem__(self, item):
"""Return a property as an attribute for jinja."""
if item in _COLLECTABLE_STATE_ATTRIBUTES:
# _collect_state inlined here for performance
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
return getattr(self._state, item)
if item == "entity_id":
return self._state.entity_id
if item == "state_with_unit":
return self.state_with_unit
raise KeyError
@property
def entity_id(self):
"""Wrap State.entity_id.
Intentionally does not collect state
"""
return self._state.entity_id
@property
def state(self):
"""Wrap State.state."""
self._collect_state()
return self._state.state
@property
def attributes(self):
"""Wrap State.attributes."""
self._collect_state()
return self._state.attributes
@property
def last_changed(self):
"""Wrap State.last_changed."""
self._collect_state()
return self._state.last_changed
@property
def last_updated(self):
"""Wrap State.last_updated."""
self._collect_state()
return self._state.last_updated
@property
def context(self):
"""Wrap State.context."""
self._collect_state()
return self._state.context
@property
def domain(self):
"""Wrap State.domain."""
self._collect_state()
return self._state.domain
@property
def object_id(self):
"""Wrap State.object_id."""
self._collect_state()
return self._state.object_id
@property
def name(self):
"""Wrap State.name."""
self._collect_state()
return self._state.name
@property
def state_with_unit(self) -> str:
"""Return the state concatenated with the unit if available."""
self._collect_state()
unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return f"{self._state.state} {unit}" if unit else self._state.state
def __eq__(self, other: Any) -> bool:
"""Ensure we collect on equality check."""
self._collect_state()
return self._state.__eq__(other)
def __repr__(self) -> str:
"""Representation of Template State."""
return f"<template TemplateState({self._state.__repr__()})>"
def _collect_state(hass: HomeAssistant, entity_id: str) -> None:
entity_collect = hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.entities.add(entity_id)
def _state_generator(hass: HomeAssistant, domain: str | None) -> Generator:
"""State generator for a domain or all states."""
for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")):
yield TemplateState(hass, state, collect=False)
def _get_state_if_valid(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
state = hass.states.get(entity_id)
if state is None and not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'") # type: ignore
return _get_template_state_from_state(hass, entity_id, state)
def _get_state(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id))
def _get_template_state_from_state(
hass: HomeAssistant, entity_id: str, state: State | None
) -> TemplateState | None:
if state is None:
# Only need to collect if none, if not none collect first actual
# access to the state properties in the state wrapper.
_collect_state(hass, entity_id)
return None
return TemplateState(hass, state)
def _resolve_state(
hass: HomeAssistant, entity_id_or_state: Any
) -> State | TemplateState | None:
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(hass, entity_id_or_state)
return None
def result_as_boolean(template_result: str | None) -> bool:
"""Convert the template result to a boolean.
True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy
False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy
"""
try:
# Import here, not at top-level to avoid circular import
from homeassistant.helpers import ( # pylint: disable=import-outside-toplevel
config_validation as cv,
)
return cv.boolean(template_result)
except vol.Invalid:
return False
def expand(hass: HomeAssistant, *args: Any) -> Iterable[State]:
"""Expand out any groups into entity states."""
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(hass, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, collections.abc.Iterable):
search += entity
continue
else:
# ignore other types
continue
if entity_id.startswith(_GROUP_DOMAIN_PREFIX):
# Collect state will be called in here since it's wrapped
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
_collect_state(hass, entity_id)
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def device_entities(hass: HomeAssistant, device_id: str) -> Iterable[str]:
"""Get entity ids for entities tied to a device."""
entity_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_device(entity_reg, device_id)
return [entry.entity_id for entry in entries]
def closest(hass, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
"""
if len(args) == 1:
latitude = hass.config.latitude
longitude = hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(hass, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(hass, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
def closest_filter(hass, *args):
"""Call closest as a filter. Need to reorder arguments."""
new_args = list(args[1:])
new_args.append(args[0])
return closest(hass, *new_args)
def distance(hass, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, str) and not valid_entity_id(value):
point_state = None
else:
point_state = _resolve_state(hass, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return hass.config.distance(*locations[0])
return hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS
)
def is_state(hass: HomeAssistant, entity_id: str, state: State) -> bool:
"""Test if a state is a specific value."""
state_obj = _get_state(hass, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(hass, entity_id, name, value):
"""Test if a state's attribute is a specific value."""
attr = state_attr(hass, entity_id, name)
return attr is not None and attr == value
def state_attr(hass, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = _get_state(hass, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def now(hass):
"""Record fetching now."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.now()
def utcnow(hass):
"""Record fetching utcnow."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.utcnow()
def forgiving_round(value, precision=0, method="common"):
"""Round accepted strings."""
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
"""Filter to get arc sine of the value."""
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
"""Filter to get arc cosine of the value."""
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
"""Filter to get arc tangent of the value."""
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
"""Filter to calculate four quadrant arc tangent of y / x."""
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError, TypeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
"""Convert a JSON string to an object."""
return json.loads(value)
def to_json(value):
"""Convert an object to a JSON string."""
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
def relative_time(value):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
"""
if not isinstance(value, datetime):
return value
if not value.tzinfo:
value = dt_util.as_local(value)
if dt_util.now() < value:
return value
return dt_util.get_age(value)
def urlencode(value):
"""Urlencode dictionary and return as UTF-8 string."""
return urllib_urlencode(value).encode("utf-8")
def _render_with_context(
template_str: str, template: jinja2.Template, **kwargs: Any
) -> str:
"""Store template being rendered in a ContextVar to aid error handling."""
template_cv.set(template_str)
return template.render(**kwargs)
class LoggingUndefined(jinja2.Undefined):
"""Log on undefined variables."""
def _log_message(self):
template = template_cv.get() or ""
_LOGGER.warning(
"Template variable warning: %s when rendering '%s'",
self._undefined_message,
template,
)
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as ex:
template = template_cv.get() or ""
_LOGGER.error(
"Template variable error: %s when rendering '%s'",
self._undefined_message,
template,
)
raise ex
def __str__(self):
"""Log undefined __str___."""
self._log_message()
return super().__str__()
def __iter__(self):
"""Log undefined __iter___."""
self._log_message()
return super().__iter__()
def __bool__(self):
"""Log undefined __bool___."""
self._log_message()
return super().__bool__()
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def __init__(self, hass, limited=False, strict=False):
"""Initialise template environment."""
if not strict:
undefined = LoggingUndefined
else:
undefined = jinja2.StrictUndefined
super().__init__(undefined=undefined)
self.hass = hass
self.template_cache = weakref.WeakValueDictionary()
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["as_local"] = dt_util.as_local
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["as_local"] = dt_util.as_local
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = relative_time
self.globals["timedelta"] = timedelta
self.globals["strptime"] = strptime
self.globals["urlencode"] = urlencode
self.globals["max"] = max
self.globals["min"] = min
if hass is None:
return
# We mark these as a context functions to ensure they get
# evaluated fresh with every execution, rather than executed
# at compile time and the value stored. The context itself
# can be discarded, we only need to get at the hass object.
def hassfunction(func):
"""Wrap function that depend on hass."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(hass, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["device_entities"] = hassfunction(device_entities)
self.filters["device_entities"] = contextfilter(self.globals["device_entities"])
if limited:
# Only device_entities is available to limited templates, mark other
# functions and filters as unsupported.
def unsupported(name):
def warn_unsupported(*args, **kwargs):
raise TemplateError(
f"Use of '{name}' is not supported in limited templates"
)
return warn_unsupported
hass_globals = [
"closest",
"distance",
"expand",
"is_state",
"is_state_attr",
"state_attr",
"states",
"utcnow",
"now",
]
hass_filters = ["closest", "expand"]
for glob in hass_globals:
self.globals[glob] = unsupported(glob)
for filt in hass_filters:
self.filters[filt] = unsupported(filt)
return
self.globals["expand"] = hassfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = hassfunction(closest)
self.filters["closest"] = contextfilter(hassfunction(closest_filter))
self.globals["distance"] = hassfunction(distance)
self.globals["is_state"] = hassfunction(is_state)
self.globals["is_state_attr"] = hassfunction(is_state_attr)
self.globals["state_attr"] = hassfunction(state_attr)
self.globals["states"] = AllStates(hass)
self.globals["utcnow"] = hassfunction(utcnow)
self.globals["now"] = hassfunction(now)
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
if isinstance(obj, (AllStates, DomainStates, TemplateState)):
return attr[0] != "_"
if isinstance(obj, Namespace):
return True
return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile the template."""
if (
name is not None
or filename is not None
or raw is not False
or defer_init is not False
):
# If there are any non-default keywords args, we do
# not cache. In prodution we currently do not have
# any instance of this.
return super().compile(source, name, filename, raw, defer_init)
cached = self.template_cache.get(source)
if cached is None:
cached = self.template_cache[source] = super().compile(source)
return cached
_NO_HASS_ENV = TemplateEnvironment(None) # type: ignore[no-untyped-call]
|
async_render_will_timeout
|
Check to see if rendering a template will timeout during render.
This is intended to check for expensive templates
that will make the system unstable. The template
is rendered in the executor to ensure it does not
tie up the event loop.
This function is not a security control and is only
intended to be used as a safety check when testing
templates.
This method must be run in the event loop.
|
"""Template helper methods for rendering strings with Home Assistant data."""
from __future__ import annotations
from ast import literal_eval
import asyncio
import base64
import collections.abc
from contextlib import suppress
from contextvars import ContextVar
from datetime import datetime, timedelta
from functools import partial, wraps
import json
import logging
import math
from operator import attrgetter
import random
import re
import sys
from typing import Any, Generator, Iterable, cast
from urllib.parse import urlencode as urllib_urlencode
import weakref
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace # type: ignore
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
STATE_UNKNOWN,
)
from homeassistant.core import (
HomeAssistant,
State,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import entity_registry, location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.thread import ThreadWithException
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_ENVIRONMENT_LIMITED = "template.environment_limited"
_ENVIRONMENT_STRICT = "template.environment_strict"
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#")
# Match "simple" ints and floats. -1.0, 1, +5, 5.0
_IS_NUMERIC = re.compile(r"^[+-]?(?!0\d)\d*(?:\.\d*)?$")
_RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"}
_GROUP_DOMAIN_PREFIX = "group."
_COLLECTABLE_STATE_ATTRIBUTES = {
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"name",
}
ALL_STATES_RATE_LIMIT = timedelta(minutes=1)
DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1)
template_cv: ContextVar[str | None] = ContextVar("template_cv", default=None)
@bind_hass
def attach(hass: HomeAssistant, obj: Any) -> None:
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, collections.abc.Mapping):
for child_key, child_value in obj.items():
attach(hass, child_key)
attach(hass, child_value)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(
value: Any, variables: TemplateVarsType = None, limited: bool = False
) -> Any:
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, collections.abc.Mapping):
return {
render_complex(key, variables): render_complex(item, variables)
for key, item in value.items()
}
if isinstance(value, Template):
return value.async_render(variables, limited=limited)
return value
def is_complex(value: Any) -> bool:
"""Test if data structure is a complex template."""
if isinstance(value, Template):
return True
if isinstance(value, list):
return any(is_complex(val) for val in value)
if isinstance(value, collections.abc.Mapping):
return any(is_complex(val) for val in value.keys()) or any(
is_complex(val) for val in value.values()
)
return False
def is_template_string(maybe_template: str) -> bool:
"""Check if the input is a Jinja2 template."""
return _RE_JINJA_DELIMITERS.search(maybe_template) is not None
class ResultWrapper:
"""Result wrapper class to store render result."""
render_result: str | None
def gen_result_wrapper(kls):
"""Generate a result wrapper."""
class Wrapper(kls, ResultWrapper):
"""Wrapper of a kls that can store render_result."""
def __init__(self, *args: tuple, render_result: str | None = None) -> None:
super().__init__(*args)
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
# Can't get set repr to work
if kls is set:
return str(set(self))
return cast(str, kls.__str__(self))
return self.render_result
return Wrapper
class TupleWrapper(tuple, ResultWrapper):
"""Wrap a tuple."""
# This is all magic to be allowed to subclass a tuple.
def __new__(cls, value: tuple, *, render_result: str | None = None) -> TupleWrapper:
"""Create a new tuple class."""
return super().__new__(cls, tuple(value))
# pylint: disable=super-init-not-called
def __init__(self, value: tuple, *, render_result: str | None = None):
"""Initialize a new tuple class."""
self.render_result = render_result
def __str__(self) -> str:
"""Return string representation."""
if self.render_result is None:
return super().__str__()
return self.render_result
RESULT_WRAPPERS: dict[type, type] = {
kls: gen_result_wrapper(kls) # type: ignore[no-untyped-call]
for kls in (list, dict, set)
}
RESULT_WRAPPERS[tuple] = TupleWrapper
def _true(arg: Any) -> bool:
return True
def _false(arg: Any) -> bool:
return False
class RenderInfo:
"""Holds information about a template render."""
def __init__(self, template):
"""Initialise."""
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self.filter = _true
self._result: str | None = None
self.is_static = False
self.exception: TemplateError | None = None
self.all_states = False
self.all_states_lifecycle = False
self.domains = set()
self.domains_lifecycle = set()
self.entities = set()
self.rate_limit: timedelta | None = None
self.has_time = False
def __repr__(self) -> str:
"""Representation of RenderInfo."""
return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}"
def _filter_domains_and_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific domains or entities."""
return (
split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities
)
def _filter_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific entities."""
return entity_id in self.entities
def _filter_lifecycle_domains(self, entity_id: str) -> bool:
"""Template should re-render if the entity is added or removed with domains watched."""
return split_entity_id(entity_id)[0] in self.domains_lifecycle
def result(self) -> str:
"""Results of the template computation."""
if self.exception is not None:
raise self.exception
return cast(str, self._result)
def _freeze_static(self) -> None:
self.is_static = True
self._freeze_sets()
self.all_states = False
def _freeze_sets(self) -> None:
self.entities = frozenset(self.entities)
self.domains = frozenset(self.domains)
self.domains_lifecycle = frozenset(self.domains_lifecycle)
def _freeze(self) -> None:
self._freeze_sets()
if self.rate_limit is None:
if self.all_states or self.exception:
self.rate_limit = ALL_STATES_RATE_LIMIT
elif self.domains or self.domains_lifecycle:
self.rate_limit = DOMAIN_STATES_RATE_LIMIT
if self.exception:
return
if not self.all_states_lifecycle:
if self.domains_lifecycle:
self.filter_lifecycle = self._filter_lifecycle_domains
else:
self.filter_lifecycle = _false
if self.all_states:
return
if self.domains:
self.filter = self._filter_domains_and_entities
elif self.entities:
self.filter = self._filter_entities
else:
self.filter = _false
class Template:
"""Class to hold a template and manage caching and rendering."""
__slots__ = (
"__weakref__",
"template",
"hass",
"is_static",
"_compiled_code",
"_compiled",
"_exc_info",
"_limited",
"_strict",
)
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template.strip()
self._compiled_code = None
self._compiled: jinja2.Template | None = None
self.hass = hass
self.is_static = not is_template_string(template)
self._exc_info = None
self._limited = None
self._strict = None
@property
def _env(self) -> TemplateEnvironment:
if self.hass is None:
return _NO_HASS_ENV
if self._limited:
wanted_env = _ENVIRONMENT_LIMITED
elif self._strict:
wanted_env = _ENVIRONMENT_STRICT
else:
wanted_env = _ENVIRONMENT
ret: TemplateEnvironment | None = self.hass.data.get(wanted_env)
if ret is None:
ret = self.hass.data[wanted_env] = TemplateEnvironment(self.hass, self._limited, self._strict) # type: ignore[no-untyped-call]
return ret
def ensure_valid(self) -> None:
"""Return if template is valid."""
if self.is_static or self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template) # type: ignore[no-untyped-call]
except jinja2.TemplateError as err:
raise TemplateError(err) from err
def render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
return run_callback_threadsafe(
self.hass.loop,
partial(self.async_render, variables, parse_result, limited, **kwargs),
).result()
@callback
def async_render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
strict: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
compiled = self._compiled or self._ensure_compiled(limited, strict)
if variables is not None:
kwargs.update(variables)
try:
render_result = _render_with_context(self.template, compiled, **kwargs)
except Exception as err:
raise TemplateError(err) from err
render_result = render_result.strip()
if self.hass.config.legacy_templates or not parse_result:
return render_result
return self._parse_result(render_result)
def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use
"""Parse the result."""
try:
result = literal_eval(render_result)
if type(result) in RESULT_WRAPPERS:
result = RESULT_WRAPPERS[type(result)](
result, render_result=render_result
)
# If the literal_eval result is a string, use the original
# render, by not returning right here. The evaluation of strings
# resulting in strings impacts quotes, to avoid unexpected
# output; use the original render instead of the evaluated one.
# Complex and scientific values are also unexpected. Filter them out.
if (
# Filter out string and complex numbers
not isinstance(result, (str, complex))
and (
# Pass if not numeric and not a boolean
not isinstance(result, (int, float))
# Or it's a boolean (inherit from int)
or isinstance(result, bool)
# Or if it's a digit
or _IS_NUMERIC.match(render_result) is not None
)
):
return result
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return render_result
# MASKED: async_render_will_timeout function (lines 432-485)
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, strict: bool = False, **kwargs: Any
) -> RenderInfo:
"""Render the template and collect an entity filter."""
assert self.hass and _RENDER_INFO not in self.hass.data
render_info = RenderInfo(self) # type: ignore[no-untyped-call]
# pylint: disable=protected-access
if self.is_static:
render_info._result = self.template.strip()
render_info._freeze_static()
return render_info
self.hass.data[_RENDER_INFO] = render_info
try:
render_info._result = self.async_render(variables, strict=strict, **kwargs)
except TemplateError as ex:
render_info.exception = ex
finally:
del self.hass.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
if self.is_static:
return self.template
return run_callback_threadsafe(
self.hass.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self.is_static:
return self.template
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
with suppress(ValueError, TypeError):
variables["value_json"] = json.loads(value)
try:
return _render_with_context(
self.template, self._compiled, **variables
).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(
self, limited: bool = False, strict: bool = False
) -> jinja2.Template:
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, "hass variable not set on template"
assert (
self._limited is None or self._limited == limited
), "can't change between limited and non limited template"
assert (
self._strict is None or self._strict == strict
), "can't change between strict and non strict template"
assert not (strict and limited), "can't combine strict and limited template"
self._limited = limited
self._strict = strict
env = self._env
self._compiled = cast(
jinja2.Template,
jinja2.Template.from_code(env, self._compiled_code, env.globals, None),
)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (
self.__class__ == other.__class__
and self.template == other.template
and self.hass == other.hass
)
def __hash__(self) -> int:
"""Hash code for template."""
return hash(self.template)
def __repr__(self) -> str:
"""Representation of Template."""
return 'Template("' + self.template + '")'
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
if "." in name:
return _get_state_if_valid(self._hass, name)
if name in _RESERVED_NAMES:
return None
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._hass, name)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_all(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states = True
def _collect_all_lifecycle(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states_lifecycle = True
def __iter__(self):
"""Return all states."""
self._collect_all()
return _state_generator(self._hass, None)
def __len__(self) -> int:
"""Return number of states."""
self._collect_all_lifecycle()
return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id):
"""Return the states."""
state = _get_state(self._hass, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self) -> str:
"""Representation of All States."""
return "<template AllStates>"
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass: HomeAssistant, domain: str) -> None:
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _get_state_if_valid(self._hass, f"{self._domain}.{name}")
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_domain(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains.add(self._domain)
def _collect_domain_lifecycle(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains_lifecycle.add(self._domain)
def __iter__(self):
"""Return the iteration over all the states."""
self._collect_domain()
return _state_generator(self._hass, self._domain)
def __len__(self) -> int:
"""Return number of states."""
self._collect_domain_lifecycle()
return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str:
"""Representation of Domain States."""
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
"""Class to represent a state object in a template."""
__slots__ = ("_hass", "_state", "_collect")
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, hass: HomeAssistant, state: State, collect: bool = True) -> None:
"""Initialize template state."""
self._hass = hass
self._state = state
self._collect = collect
def _collect_state(self) -> None:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
def __getitem__(self, item):
"""Return a property as an attribute for jinja."""
if item in _COLLECTABLE_STATE_ATTRIBUTES:
# _collect_state inlined here for performance
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
return getattr(self._state, item)
if item == "entity_id":
return self._state.entity_id
if item == "state_with_unit":
return self.state_with_unit
raise KeyError
@property
def entity_id(self):
"""Wrap State.entity_id.
Intentionally does not collect state
"""
return self._state.entity_id
@property
def state(self):
"""Wrap State.state."""
self._collect_state()
return self._state.state
@property
def attributes(self):
"""Wrap State.attributes."""
self._collect_state()
return self._state.attributes
@property
def last_changed(self):
"""Wrap State.last_changed."""
self._collect_state()
return self._state.last_changed
@property
def last_updated(self):
"""Wrap State.last_updated."""
self._collect_state()
return self._state.last_updated
@property
def context(self):
"""Wrap State.context."""
self._collect_state()
return self._state.context
@property
def domain(self):
"""Wrap State.domain."""
self._collect_state()
return self._state.domain
@property
def object_id(self):
"""Wrap State.object_id."""
self._collect_state()
return self._state.object_id
@property
def name(self):
"""Wrap State.name."""
self._collect_state()
return self._state.name
@property
def state_with_unit(self) -> str:
"""Return the state concatenated with the unit if available."""
self._collect_state()
unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return f"{self._state.state} {unit}" if unit else self._state.state
def __eq__(self, other: Any) -> bool:
"""Ensure we collect on equality check."""
self._collect_state()
return self._state.__eq__(other)
def __repr__(self) -> str:
"""Representation of Template State."""
return f"<template TemplateState({self._state.__repr__()})>"
def _collect_state(hass: HomeAssistant, entity_id: str) -> None:
entity_collect = hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.entities.add(entity_id)
def _state_generator(hass: HomeAssistant, domain: str | None) -> Generator:
"""State generator for a domain or all states."""
for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")):
yield TemplateState(hass, state, collect=False)
def _get_state_if_valid(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
state = hass.states.get(entity_id)
if state is None and not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'") # type: ignore
return _get_template_state_from_state(hass, entity_id, state)
def _get_state(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id))
def _get_template_state_from_state(
hass: HomeAssistant, entity_id: str, state: State | None
) -> TemplateState | None:
if state is None:
# Only need to collect if none, if not none collect first actual
# access to the state properties in the state wrapper.
_collect_state(hass, entity_id)
return None
return TemplateState(hass, state)
def _resolve_state(
hass: HomeAssistant, entity_id_or_state: Any
) -> State | TemplateState | None:
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(hass, entity_id_or_state)
return None
def result_as_boolean(template_result: str | None) -> bool:
"""Convert the template result to a boolean.
True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy
False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy
"""
try:
# Import here, not at top-level to avoid circular import
from homeassistant.helpers import ( # pylint: disable=import-outside-toplevel
config_validation as cv,
)
return cv.boolean(template_result)
except vol.Invalid:
return False
def expand(hass: HomeAssistant, *args: Any) -> Iterable[State]:
"""Expand out any groups into entity states."""
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(hass, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, collections.abc.Iterable):
search += entity
continue
else:
# ignore other types
continue
if entity_id.startswith(_GROUP_DOMAIN_PREFIX):
# Collect state will be called in here since it's wrapped
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
_collect_state(hass, entity_id)
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def device_entities(hass: HomeAssistant, device_id: str) -> Iterable[str]:
"""Get entity ids for entities tied to a device."""
entity_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_device(entity_reg, device_id)
return [entry.entity_id for entry in entries]
def closest(hass, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
"""
if len(args) == 1:
latitude = hass.config.latitude
longitude = hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(hass, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(hass, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
def closest_filter(hass, *args):
"""Call closest as a filter. Need to reorder arguments."""
new_args = list(args[1:])
new_args.append(args[0])
return closest(hass, *new_args)
def distance(hass, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, str) and not valid_entity_id(value):
point_state = None
else:
point_state = _resolve_state(hass, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return hass.config.distance(*locations[0])
return hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS
)
def is_state(hass: HomeAssistant, entity_id: str, state: State) -> bool:
"""Test if a state is a specific value."""
state_obj = _get_state(hass, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(hass, entity_id, name, value):
"""Test if a state's attribute is a specific value."""
attr = state_attr(hass, entity_id, name)
return attr is not None and attr == value
def state_attr(hass, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = _get_state(hass, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def now(hass):
"""Record fetching now."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.now()
def utcnow(hass):
"""Record fetching utcnow."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.utcnow()
def forgiving_round(value, precision=0, method="common"):
"""Round accepted strings."""
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
"""Filter to get arc sine of the value."""
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
"""Filter to get arc cosine of the value."""
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
"""Filter to get arc tangent of the value."""
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
"""Filter to calculate four quadrant arc tangent of y / x."""
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError, TypeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
"""Convert a JSON string to an object."""
return json.loads(value)
def to_json(value):
"""Convert an object to a JSON string."""
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
def relative_time(value):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
"""
if not isinstance(value, datetime):
return value
if not value.tzinfo:
value = dt_util.as_local(value)
if dt_util.now() < value:
return value
return dt_util.get_age(value)
def urlencode(value):
"""Urlencode dictionary and return as UTF-8 string."""
return urllib_urlencode(value).encode("utf-8")
def _render_with_context(
template_str: str, template: jinja2.Template, **kwargs: Any
) -> str:
"""Store template being rendered in a ContextVar to aid error handling."""
template_cv.set(template_str)
return template.render(**kwargs)
class LoggingUndefined(jinja2.Undefined):
"""Log on undefined variables."""
def _log_message(self):
template = template_cv.get() or ""
_LOGGER.warning(
"Template variable warning: %s when rendering '%s'",
self._undefined_message,
template,
)
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as ex:
template = template_cv.get() or ""
_LOGGER.error(
"Template variable error: %s when rendering '%s'",
self._undefined_message,
template,
)
raise ex
def __str__(self):
"""Log undefined __str___."""
self._log_message()
return super().__str__()
def __iter__(self):
"""Log undefined __iter___."""
self._log_message()
return super().__iter__()
def __bool__(self):
"""Log undefined __bool___."""
self._log_message()
return super().__bool__()
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def __init__(self, hass, limited=False, strict=False):
"""Initialise template environment."""
if not strict:
undefined = LoggingUndefined
else:
undefined = jinja2.StrictUndefined
super().__init__(undefined=undefined)
self.hass = hass
self.template_cache = weakref.WeakValueDictionary()
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["as_local"] = dt_util.as_local
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["as_local"] = dt_util.as_local
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = relative_time
self.globals["timedelta"] = timedelta
self.globals["strptime"] = strptime
self.globals["urlencode"] = urlencode
self.globals["max"] = max
self.globals["min"] = min
if hass is None:
return
# We mark these as a context functions to ensure they get
# evaluated fresh with every execution, rather than executed
# at compile time and the value stored. The context itself
# can be discarded, we only need to get at the hass object.
def hassfunction(func):
"""Wrap function that depend on hass."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(hass, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["device_entities"] = hassfunction(device_entities)
self.filters["device_entities"] = contextfilter(self.globals["device_entities"])
if limited:
# Only device_entities is available to limited templates, mark other
# functions and filters as unsupported.
def unsupported(name):
def warn_unsupported(*args, **kwargs):
raise TemplateError(
f"Use of '{name}' is not supported in limited templates"
)
return warn_unsupported
hass_globals = [
"closest",
"distance",
"expand",
"is_state",
"is_state_attr",
"state_attr",
"states",
"utcnow",
"now",
]
hass_filters = ["closest", "expand"]
for glob in hass_globals:
self.globals[glob] = unsupported(glob)
for filt in hass_filters:
self.filters[filt] = unsupported(filt)
return
self.globals["expand"] = hassfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = hassfunction(closest)
self.filters["closest"] = contextfilter(hassfunction(closest_filter))
self.globals["distance"] = hassfunction(distance)
self.globals["is_state"] = hassfunction(is_state)
self.globals["is_state_attr"] = hassfunction(is_state_attr)
self.globals["state_attr"] = hassfunction(state_attr)
self.globals["states"] = AllStates(hass)
self.globals["utcnow"] = hassfunction(utcnow)
self.globals["now"] = hassfunction(now)
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
if isinstance(obj, (AllStates, DomainStates, TemplateState)):
return attr[0] != "_"
if isinstance(obj, Namespace):
return True
return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile the template."""
if (
name is not None
or filename is not None
or raw is not False
or defer_init is not False
):
# If there are any non-default keywords args, we do
# not cache. In prodution we currently do not have
# any instance of this.
return super().compile(source, name, filename, raw, defer_init)
cached = self.template_cache.get(source)
if cached is None:
cached = self.template_cache[source] = super().compile(source)
return cached
_NO_HASS_ENV = TemplateEnvironment(None) # type: ignore[no-untyped-call]
|
async def async_render_will_timeout(
self,
timeout: float,
variables: TemplateVarsType = None,
strict: bool = False,
**kwargs: Any,
) -> bool:
"""Check to see if rendering a template will timeout during render.
This is intended to check for expensive templates
that will make the system unstable. The template
is rendered in the executor to ensure it does not
tie up the event loop.
This function is not a security control and is only
intended to be used as a safety check when testing
templates.
This method must be run in the event loop.
"""
if self.is_static:
return False
compiled = self._compiled or self._ensure_compiled(strict=strict)
if variables is not None:
kwargs.update(variables)
self._exc_info = None
finish_event = asyncio.Event()
def _render_template() -> None:
try:
_render_with_context(self.template, compiled, **kwargs)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
self._exc_info = sys.exc_info()
finally:
run_callback_threadsafe(self.hass.loop, finish_event.set)
try:
template_render_thread = ThreadWithException(target=_render_template)
template_render_thread.start()
await asyncio.wait_for(finish_event.wait(), timeout=timeout)
if self._exc_info:
raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2]))
except asyncio.TimeoutError:
template_render_thread.raise_exc(TimeoutError)
return True
finally:
template_render_thread.join()
return False
| 432
| 485
|
"""Template helper methods for rendering strings with Home Assistant data."""
from __future__ import annotations
from ast import literal_eval
import asyncio
import base64
import collections.abc
from contextlib import suppress
from contextvars import ContextVar
from datetime import datetime, timedelta
from functools import partial, wraps
import json
import logging
import math
from operator import attrgetter
import random
import re
import sys
from typing import Any, Generator, Iterable, cast
from urllib.parse import urlencode as urllib_urlencode
import weakref
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace # type: ignore
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
STATE_UNKNOWN,
)
from homeassistant.core import (
HomeAssistant,
State,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import entity_registry, location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.thread import ThreadWithException
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_ENVIRONMENT_LIMITED = "template.environment_limited"
_ENVIRONMENT_STRICT = "template.environment_strict"
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#")
# Match "simple" ints and floats. -1.0, 1, +5, 5.0
_IS_NUMERIC = re.compile(r"^[+-]?(?!0\d)\d*(?:\.\d*)?$")
_RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"}
_GROUP_DOMAIN_PREFIX = "group."
_COLLECTABLE_STATE_ATTRIBUTES = {
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"name",
}
ALL_STATES_RATE_LIMIT = timedelta(minutes=1)
DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1)
template_cv: ContextVar[str | None] = ContextVar("template_cv", default=None)
@bind_hass
def attach(hass: HomeAssistant, obj: Any) -> None:
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, collections.abc.Mapping):
for child_key, child_value in obj.items():
attach(hass, child_key)
attach(hass, child_value)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(
value: Any, variables: TemplateVarsType = None, limited: bool = False
) -> Any:
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, collections.abc.Mapping):
return {
render_complex(key, variables): render_complex(item, variables)
for key, item in value.items()
}
if isinstance(value, Template):
return value.async_render(variables, limited=limited)
return value
def is_complex(value: Any) -> bool:
"""Test if data structure is a complex template."""
if isinstance(value, Template):
return True
if isinstance(value, list):
return any(is_complex(val) for val in value)
if isinstance(value, collections.abc.Mapping):
return any(is_complex(val) for val in value.keys()) or any(
is_complex(val) for val in value.values()
)
return False
def is_template_string(maybe_template: str) -> bool:
"""Check if the input is a Jinja2 template."""
return _RE_JINJA_DELIMITERS.search(maybe_template) is not None
class ResultWrapper:
"""Result wrapper class to store render result."""
render_result: str | None
def gen_result_wrapper(kls):
"""Generate a result wrapper."""
class Wrapper(kls, ResultWrapper):
"""Wrapper of a kls that can store render_result."""
def __init__(self, *args: tuple, render_result: str | None = None) -> None:
super().__init__(*args)
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
# Can't get set repr to work
if kls is set:
return str(set(self))
return cast(str, kls.__str__(self))
return self.render_result
return Wrapper
class TupleWrapper(tuple, ResultWrapper):
"""Wrap a tuple."""
# This is all magic to be allowed to subclass a tuple.
def __new__(cls, value: tuple, *, render_result: str | None = None) -> TupleWrapper:
"""Create a new tuple class."""
return super().__new__(cls, tuple(value))
# pylint: disable=super-init-not-called
def __init__(self, value: tuple, *, render_result: str | None = None):
"""Initialize a new tuple class."""
self.render_result = render_result
def __str__(self) -> str:
"""Return string representation."""
if self.render_result is None:
return super().__str__()
return self.render_result
RESULT_WRAPPERS: dict[type, type] = {
kls: gen_result_wrapper(kls) # type: ignore[no-untyped-call]
for kls in (list, dict, set)
}
RESULT_WRAPPERS[tuple] = TupleWrapper
def _true(arg: Any) -> bool:
return True
def _false(arg: Any) -> bool:
return False
class RenderInfo:
"""Holds information about a template render."""
def __init__(self, template):
"""Initialise."""
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self.filter = _true
self._result: str | None = None
self.is_static = False
self.exception: TemplateError | None = None
self.all_states = False
self.all_states_lifecycle = False
self.domains = set()
self.domains_lifecycle = set()
self.entities = set()
self.rate_limit: timedelta | None = None
self.has_time = False
def __repr__(self) -> str:
"""Representation of RenderInfo."""
return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}"
def _filter_domains_and_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific domains or entities."""
return (
split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities
)
def _filter_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific entities."""
return entity_id in self.entities
def _filter_lifecycle_domains(self, entity_id: str) -> bool:
"""Template should re-render if the entity is added or removed with domains watched."""
return split_entity_id(entity_id)[0] in self.domains_lifecycle
def result(self) -> str:
"""Results of the template computation."""
if self.exception is not None:
raise self.exception
return cast(str, self._result)
def _freeze_static(self) -> None:
self.is_static = True
self._freeze_sets()
self.all_states = False
def _freeze_sets(self) -> None:
self.entities = frozenset(self.entities)
self.domains = frozenset(self.domains)
self.domains_lifecycle = frozenset(self.domains_lifecycle)
def _freeze(self) -> None:
self._freeze_sets()
if self.rate_limit is None:
if self.all_states or self.exception:
self.rate_limit = ALL_STATES_RATE_LIMIT
elif self.domains or self.domains_lifecycle:
self.rate_limit = DOMAIN_STATES_RATE_LIMIT
if self.exception:
return
if not self.all_states_lifecycle:
if self.domains_lifecycle:
self.filter_lifecycle = self._filter_lifecycle_domains
else:
self.filter_lifecycle = _false
if self.all_states:
return
if self.domains:
self.filter = self._filter_domains_and_entities
elif self.entities:
self.filter = self._filter_entities
else:
self.filter = _false
class Template:
"""Class to hold a template and manage caching and rendering."""
__slots__ = (
"__weakref__",
"template",
"hass",
"is_static",
"_compiled_code",
"_compiled",
"_exc_info",
"_limited",
"_strict",
)
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template.strip()
self._compiled_code = None
self._compiled: jinja2.Template | None = None
self.hass = hass
self.is_static = not is_template_string(template)
self._exc_info = None
self._limited = None
self._strict = None
@property
def _env(self) -> TemplateEnvironment:
if self.hass is None:
return _NO_HASS_ENV
if self._limited:
wanted_env = _ENVIRONMENT_LIMITED
elif self._strict:
wanted_env = _ENVIRONMENT_STRICT
else:
wanted_env = _ENVIRONMENT
ret: TemplateEnvironment | None = self.hass.data.get(wanted_env)
if ret is None:
ret = self.hass.data[wanted_env] = TemplateEnvironment(self.hass, self._limited, self._strict) # type: ignore[no-untyped-call]
return ret
def ensure_valid(self) -> None:
"""Return if template is valid."""
if self.is_static or self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template) # type: ignore[no-untyped-call]
except jinja2.TemplateError as err:
raise TemplateError(err) from err
def render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
return run_callback_threadsafe(
self.hass.loop,
partial(self.async_render, variables, parse_result, limited, **kwargs),
).result()
@callback
def async_render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
strict: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
compiled = self._compiled or self._ensure_compiled(limited, strict)
if variables is not None:
kwargs.update(variables)
try:
render_result = _render_with_context(self.template, compiled, **kwargs)
except Exception as err:
raise TemplateError(err) from err
render_result = render_result.strip()
if self.hass.config.legacy_templates or not parse_result:
return render_result
return self._parse_result(render_result)
def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use
"""Parse the result."""
try:
result = literal_eval(render_result)
if type(result) in RESULT_WRAPPERS:
result = RESULT_WRAPPERS[type(result)](
result, render_result=render_result
)
# If the literal_eval result is a string, use the original
# render, by not returning right here. The evaluation of strings
# resulting in strings impacts quotes, to avoid unexpected
# output; use the original render instead of the evaluated one.
# Complex and scientific values are also unexpected. Filter them out.
if (
# Filter out string and complex numbers
not isinstance(result, (str, complex))
and (
# Pass if not numeric and not a boolean
not isinstance(result, (int, float))
# Or it's a boolean (inherit from int)
or isinstance(result, bool)
# Or if it's a digit
or _IS_NUMERIC.match(render_result) is not None
)
):
return result
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return render_result
async def async_render_will_timeout(
self,
timeout: float,
variables: TemplateVarsType = None,
strict: bool = False,
**kwargs: Any,
) -> bool:
"""Check to see if rendering a template will timeout during render.
This is intended to check for expensive templates
that will make the system unstable. The template
is rendered in the executor to ensure it does not
tie up the event loop.
This function is not a security control and is only
intended to be used as a safety check when testing
templates.
This method must be run in the event loop.
"""
if self.is_static:
return False
compiled = self._compiled or self._ensure_compiled(strict=strict)
if variables is not None:
kwargs.update(variables)
self._exc_info = None
finish_event = asyncio.Event()
def _render_template() -> None:
try:
_render_with_context(self.template, compiled, **kwargs)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
self._exc_info = sys.exc_info()
finally:
run_callback_threadsafe(self.hass.loop, finish_event.set)
try:
template_render_thread = ThreadWithException(target=_render_template)
template_render_thread.start()
await asyncio.wait_for(finish_event.wait(), timeout=timeout)
if self._exc_info:
raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2]))
except asyncio.TimeoutError:
template_render_thread.raise_exc(TimeoutError)
return True
finally:
template_render_thread.join()
return False
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, strict: bool = False, **kwargs: Any
) -> RenderInfo:
"""Render the template and collect an entity filter."""
assert self.hass and _RENDER_INFO not in self.hass.data
render_info = RenderInfo(self) # type: ignore[no-untyped-call]
# pylint: disable=protected-access
if self.is_static:
render_info._result = self.template.strip()
render_info._freeze_static()
return render_info
self.hass.data[_RENDER_INFO] = render_info
try:
render_info._result = self.async_render(variables, strict=strict, **kwargs)
except TemplateError as ex:
render_info.exception = ex
finally:
del self.hass.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
if self.is_static:
return self.template
return run_callback_threadsafe(
self.hass.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self.is_static:
return self.template
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
with suppress(ValueError, TypeError):
variables["value_json"] = json.loads(value)
try:
return _render_with_context(
self.template, self._compiled, **variables
).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(
self, limited: bool = False, strict: bool = False
) -> jinja2.Template:
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, "hass variable not set on template"
assert (
self._limited is None or self._limited == limited
), "can't change between limited and non limited template"
assert (
self._strict is None or self._strict == strict
), "can't change between strict and non strict template"
assert not (strict and limited), "can't combine strict and limited template"
self._limited = limited
self._strict = strict
env = self._env
self._compiled = cast(
jinja2.Template,
jinja2.Template.from_code(env, self._compiled_code, env.globals, None),
)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (
self.__class__ == other.__class__
and self.template == other.template
and self.hass == other.hass
)
def __hash__(self) -> int:
"""Hash code for template."""
return hash(self.template)
def __repr__(self) -> str:
"""Representation of Template."""
return 'Template("' + self.template + '")'
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
if "." in name:
return _get_state_if_valid(self._hass, name)
if name in _RESERVED_NAMES:
return None
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._hass, name)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_all(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states = True
def _collect_all_lifecycle(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states_lifecycle = True
def __iter__(self):
"""Return all states."""
self._collect_all()
return _state_generator(self._hass, None)
def __len__(self) -> int:
"""Return number of states."""
self._collect_all_lifecycle()
return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id):
"""Return the states."""
state = _get_state(self._hass, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self) -> str:
"""Representation of All States."""
return "<template AllStates>"
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass: HomeAssistant, domain: str) -> None:
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _get_state_if_valid(self._hass, f"{self._domain}.{name}")
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_domain(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains.add(self._domain)
def _collect_domain_lifecycle(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains_lifecycle.add(self._domain)
def __iter__(self):
"""Return the iteration over all the states."""
self._collect_domain()
return _state_generator(self._hass, self._domain)
def __len__(self) -> int:
"""Return number of states."""
self._collect_domain_lifecycle()
return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str:
"""Representation of Domain States."""
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
"""Class to represent a state object in a template."""
__slots__ = ("_hass", "_state", "_collect")
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, hass: HomeAssistant, state: State, collect: bool = True) -> None:
"""Initialize template state."""
self._hass = hass
self._state = state
self._collect = collect
def _collect_state(self) -> None:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
def __getitem__(self, item):
"""Return a property as an attribute for jinja."""
if item in _COLLECTABLE_STATE_ATTRIBUTES:
# _collect_state inlined here for performance
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
return getattr(self._state, item)
if item == "entity_id":
return self._state.entity_id
if item == "state_with_unit":
return self.state_with_unit
raise KeyError
@property
def entity_id(self):
"""Wrap State.entity_id.
Intentionally does not collect state
"""
return self._state.entity_id
@property
def state(self):
"""Wrap State.state."""
self._collect_state()
return self._state.state
@property
def attributes(self):
"""Wrap State.attributes."""
self._collect_state()
return self._state.attributes
@property
def last_changed(self):
"""Wrap State.last_changed."""
self._collect_state()
return self._state.last_changed
@property
def last_updated(self):
"""Wrap State.last_updated."""
self._collect_state()
return self._state.last_updated
@property
def context(self):
"""Wrap State.context."""
self._collect_state()
return self._state.context
@property
def domain(self):
"""Wrap State.domain."""
self._collect_state()
return self._state.domain
@property
def object_id(self):
"""Wrap State.object_id."""
self._collect_state()
return self._state.object_id
@property
def name(self):
"""Wrap State.name."""
self._collect_state()
return self._state.name
@property
def state_with_unit(self) -> str:
"""Return the state concatenated with the unit if available."""
self._collect_state()
unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return f"{self._state.state} {unit}" if unit else self._state.state
def __eq__(self, other: Any) -> bool:
"""Ensure we collect on equality check."""
self._collect_state()
return self._state.__eq__(other)
def __repr__(self) -> str:
"""Representation of Template State."""
return f"<template TemplateState({self._state.__repr__()})>"
def _collect_state(hass: HomeAssistant, entity_id: str) -> None:
entity_collect = hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.entities.add(entity_id)
def _state_generator(hass: HomeAssistant, domain: str | None) -> Generator:
"""State generator for a domain or all states."""
for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")):
yield TemplateState(hass, state, collect=False)
def _get_state_if_valid(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
state = hass.states.get(entity_id)
if state is None and not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'") # type: ignore
return _get_template_state_from_state(hass, entity_id, state)
def _get_state(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id))
def _get_template_state_from_state(
hass: HomeAssistant, entity_id: str, state: State | None
) -> TemplateState | None:
if state is None:
# Only need to collect if none, if not none collect first actual
# access to the state properties in the state wrapper.
_collect_state(hass, entity_id)
return None
return TemplateState(hass, state)
def _resolve_state(
hass: HomeAssistant, entity_id_or_state: Any
) -> State | TemplateState | None:
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(hass, entity_id_or_state)
return None
def result_as_boolean(template_result: str | None) -> bool:
"""Convert the template result to a boolean.
True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy
False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy
"""
try:
# Import here, not at top-level to avoid circular import
from homeassistant.helpers import ( # pylint: disable=import-outside-toplevel
config_validation as cv,
)
return cv.boolean(template_result)
except vol.Invalid:
return False
def expand(hass: HomeAssistant, *args: Any) -> Iterable[State]:
"""Expand out any groups into entity states."""
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(hass, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, collections.abc.Iterable):
search += entity
continue
else:
# ignore other types
continue
if entity_id.startswith(_GROUP_DOMAIN_PREFIX):
# Collect state will be called in here since it's wrapped
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
_collect_state(hass, entity_id)
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def device_entities(hass: HomeAssistant, device_id: str) -> Iterable[str]:
"""Get entity ids for entities tied to a device."""
entity_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_device(entity_reg, device_id)
return [entry.entity_id for entry in entries]
def closest(hass, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
"""
if len(args) == 1:
latitude = hass.config.latitude
longitude = hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(hass, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(hass, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
def closest_filter(hass, *args):
"""Call closest as a filter. Need to reorder arguments."""
new_args = list(args[1:])
new_args.append(args[0])
return closest(hass, *new_args)
def distance(hass, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, str) and not valid_entity_id(value):
point_state = None
else:
point_state = _resolve_state(hass, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return hass.config.distance(*locations[0])
return hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS
)
def is_state(hass: HomeAssistant, entity_id: str, state: State) -> bool:
"""Test if a state is a specific value."""
state_obj = _get_state(hass, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(hass, entity_id, name, value):
"""Test if a state's attribute is a specific value."""
attr = state_attr(hass, entity_id, name)
return attr is not None and attr == value
def state_attr(hass, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = _get_state(hass, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def now(hass):
"""Record fetching now."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.now()
def utcnow(hass):
"""Record fetching utcnow."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.utcnow()
def forgiving_round(value, precision=0, method="common"):
"""Round accepted strings."""
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
"""Filter to get arc sine of the value."""
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
"""Filter to get arc cosine of the value."""
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
"""Filter to get arc tangent of the value."""
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
"""Filter to calculate four quadrant arc tangent of y / x."""
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError, TypeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
"""Convert a JSON string to an object."""
return json.loads(value)
def to_json(value):
"""Convert an object to a JSON string."""
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
def relative_time(value):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
"""
if not isinstance(value, datetime):
return value
if not value.tzinfo:
value = dt_util.as_local(value)
if dt_util.now() < value:
return value
return dt_util.get_age(value)
def urlencode(value):
"""Urlencode dictionary and return as UTF-8 string."""
return urllib_urlencode(value).encode("utf-8")
def _render_with_context(
template_str: str, template: jinja2.Template, **kwargs: Any
) -> str:
"""Store template being rendered in a ContextVar to aid error handling."""
template_cv.set(template_str)
return template.render(**kwargs)
class LoggingUndefined(jinja2.Undefined):
"""Log on undefined variables."""
def _log_message(self):
template = template_cv.get() or ""
_LOGGER.warning(
"Template variable warning: %s when rendering '%s'",
self._undefined_message,
template,
)
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as ex:
template = template_cv.get() or ""
_LOGGER.error(
"Template variable error: %s when rendering '%s'",
self._undefined_message,
template,
)
raise ex
def __str__(self):
"""Log undefined __str___."""
self._log_message()
return super().__str__()
def __iter__(self):
"""Log undefined __iter___."""
self._log_message()
return super().__iter__()
def __bool__(self):
"""Log undefined __bool___."""
self._log_message()
return super().__bool__()
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def __init__(self, hass, limited=False, strict=False):
"""Initialise template environment."""
if not strict:
undefined = LoggingUndefined
else:
undefined = jinja2.StrictUndefined
super().__init__(undefined=undefined)
self.hass = hass
self.template_cache = weakref.WeakValueDictionary()
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["as_local"] = dt_util.as_local
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["as_local"] = dt_util.as_local
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = relative_time
self.globals["timedelta"] = timedelta
self.globals["strptime"] = strptime
self.globals["urlencode"] = urlencode
self.globals["max"] = max
self.globals["min"] = min
if hass is None:
return
# We mark these as a context functions to ensure they get
# evaluated fresh with every execution, rather than executed
# at compile time and the value stored. The context itself
# can be discarded, we only need to get at the hass object.
def hassfunction(func):
"""Wrap function that depend on hass."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(hass, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["device_entities"] = hassfunction(device_entities)
self.filters["device_entities"] = contextfilter(self.globals["device_entities"])
if limited:
# Only device_entities is available to limited templates, mark other
# functions and filters as unsupported.
def unsupported(name):
def warn_unsupported(*args, **kwargs):
raise TemplateError(
f"Use of '{name}' is not supported in limited templates"
)
return warn_unsupported
hass_globals = [
"closest",
"distance",
"expand",
"is_state",
"is_state_attr",
"state_attr",
"states",
"utcnow",
"now",
]
hass_filters = ["closest", "expand"]
for glob in hass_globals:
self.globals[glob] = unsupported(glob)
for filt in hass_filters:
self.filters[filt] = unsupported(filt)
return
self.globals["expand"] = hassfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = hassfunction(closest)
self.filters["closest"] = contextfilter(hassfunction(closest_filter))
self.globals["distance"] = hassfunction(distance)
self.globals["is_state"] = hassfunction(is_state)
self.globals["is_state_attr"] = hassfunction(is_state_attr)
self.globals["state_attr"] = hassfunction(state_attr)
self.globals["states"] = AllStates(hass)
self.globals["utcnow"] = hassfunction(utcnow)
self.globals["now"] = hassfunction(now)
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
if isinstance(obj, (AllStates, DomainStates, TemplateState)):
return attr[0] != "_"
if isinstance(obj, Namespace):
return True
return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile the template."""
if (
name is not None
or filename is not None
or raw is not False
or defer_init is not False
):
# If there are any non-default keywords args, we do
# not cache. In prodution we currently do not have
# any instance of this.
return super().compile(source, name, filename, raw, defer_init)
cached = self.template_cache.get(source)
if cached is None:
cached = self.template_cache[source] = super().compile(source)
return cached
_NO_HASS_ENV = TemplateEnvironment(None) # type: ignore[no-untyped-call]
|
async_render_with_possible_json_value
|
Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
|
"""Template helper methods for rendering strings with Home Assistant data."""
from __future__ import annotations
from ast import literal_eval
import asyncio
import base64
import collections.abc
from contextlib import suppress
from contextvars import ContextVar
from datetime import datetime, timedelta
from functools import partial, wraps
import json
import logging
import math
from operator import attrgetter
import random
import re
import sys
from typing import Any, Generator, Iterable, cast
from urllib.parse import urlencode as urllib_urlencode
import weakref
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace # type: ignore
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
STATE_UNKNOWN,
)
from homeassistant.core import (
HomeAssistant,
State,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import entity_registry, location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.thread import ThreadWithException
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_ENVIRONMENT_LIMITED = "template.environment_limited"
_ENVIRONMENT_STRICT = "template.environment_strict"
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#")
# Match "simple" ints and floats. -1.0, 1, +5, 5.0
_IS_NUMERIC = re.compile(r"^[+-]?(?!0\d)\d*(?:\.\d*)?$")
_RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"}
_GROUP_DOMAIN_PREFIX = "group."
_COLLECTABLE_STATE_ATTRIBUTES = {
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"name",
}
ALL_STATES_RATE_LIMIT = timedelta(minutes=1)
DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1)
template_cv: ContextVar[str | None] = ContextVar("template_cv", default=None)
@bind_hass
def attach(hass: HomeAssistant, obj: Any) -> None:
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, collections.abc.Mapping):
for child_key, child_value in obj.items():
attach(hass, child_key)
attach(hass, child_value)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(
value: Any, variables: TemplateVarsType = None, limited: bool = False
) -> Any:
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, collections.abc.Mapping):
return {
render_complex(key, variables): render_complex(item, variables)
for key, item in value.items()
}
if isinstance(value, Template):
return value.async_render(variables, limited=limited)
return value
def is_complex(value: Any) -> bool:
"""Test if data structure is a complex template."""
if isinstance(value, Template):
return True
if isinstance(value, list):
return any(is_complex(val) for val in value)
if isinstance(value, collections.abc.Mapping):
return any(is_complex(val) for val in value.keys()) or any(
is_complex(val) for val in value.values()
)
return False
def is_template_string(maybe_template: str) -> bool:
"""Check if the input is a Jinja2 template."""
return _RE_JINJA_DELIMITERS.search(maybe_template) is not None
class ResultWrapper:
"""Result wrapper class to store render result."""
render_result: str | None
def gen_result_wrapper(kls):
"""Generate a result wrapper."""
class Wrapper(kls, ResultWrapper):
"""Wrapper of a kls that can store render_result."""
def __init__(self, *args: tuple, render_result: str | None = None) -> None:
super().__init__(*args)
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
# Can't get set repr to work
if kls is set:
return str(set(self))
return cast(str, kls.__str__(self))
return self.render_result
return Wrapper
class TupleWrapper(tuple, ResultWrapper):
"""Wrap a tuple."""
# This is all magic to be allowed to subclass a tuple.
def __new__(cls, value: tuple, *, render_result: str | None = None) -> TupleWrapper:
"""Create a new tuple class."""
return super().__new__(cls, tuple(value))
# pylint: disable=super-init-not-called
def __init__(self, value: tuple, *, render_result: str | None = None):
"""Initialize a new tuple class."""
self.render_result = render_result
def __str__(self) -> str:
"""Return string representation."""
if self.render_result is None:
return super().__str__()
return self.render_result
RESULT_WRAPPERS: dict[type, type] = {
kls: gen_result_wrapper(kls) # type: ignore[no-untyped-call]
for kls in (list, dict, set)
}
RESULT_WRAPPERS[tuple] = TupleWrapper
def _true(arg: Any) -> bool:
return True
def _false(arg: Any) -> bool:
return False
class RenderInfo:
"""Holds information about a template render."""
def __init__(self, template):
"""Initialise."""
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self.filter = _true
self._result: str | None = None
self.is_static = False
self.exception: TemplateError | None = None
self.all_states = False
self.all_states_lifecycle = False
self.domains = set()
self.domains_lifecycle = set()
self.entities = set()
self.rate_limit: timedelta | None = None
self.has_time = False
def __repr__(self) -> str:
"""Representation of RenderInfo."""
return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}"
def _filter_domains_and_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific domains or entities."""
return (
split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities
)
def _filter_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific entities."""
return entity_id in self.entities
def _filter_lifecycle_domains(self, entity_id: str) -> bool:
"""Template should re-render if the entity is added or removed with domains watched."""
return split_entity_id(entity_id)[0] in self.domains_lifecycle
def result(self) -> str:
"""Results of the template computation."""
if self.exception is not None:
raise self.exception
return cast(str, self._result)
def _freeze_static(self) -> None:
self.is_static = True
self._freeze_sets()
self.all_states = False
def _freeze_sets(self) -> None:
self.entities = frozenset(self.entities)
self.domains = frozenset(self.domains)
self.domains_lifecycle = frozenset(self.domains_lifecycle)
def _freeze(self) -> None:
self._freeze_sets()
if self.rate_limit is None:
if self.all_states or self.exception:
self.rate_limit = ALL_STATES_RATE_LIMIT
elif self.domains or self.domains_lifecycle:
self.rate_limit = DOMAIN_STATES_RATE_LIMIT
if self.exception:
return
if not self.all_states_lifecycle:
if self.domains_lifecycle:
self.filter_lifecycle = self._filter_lifecycle_domains
else:
self.filter_lifecycle = _false
if self.all_states:
return
if self.domains:
self.filter = self._filter_domains_and_entities
elif self.entities:
self.filter = self._filter_entities
else:
self.filter = _false
class Template:
"""Class to hold a template and manage caching and rendering."""
__slots__ = (
"__weakref__",
"template",
"hass",
"is_static",
"_compiled_code",
"_compiled",
"_exc_info",
"_limited",
"_strict",
)
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template.strip()
self._compiled_code = None
self._compiled: jinja2.Template | None = None
self.hass = hass
self.is_static = not is_template_string(template)
self._exc_info = None
self._limited = None
self._strict = None
@property
def _env(self) -> TemplateEnvironment:
if self.hass is None:
return _NO_HASS_ENV
if self._limited:
wanted_env = _ENVIRONMENT_LIMITED
elif self._strict:
wanted_env = _ENVIRONMENT_STRICT
else:
wanted_env = _ENVIRONMENT
ret: TemplateEnvironment | None = self.hass.data.get(wanted_env)
if ret is None:
ret = self.hass.data[wanted_env] = TemplateEnvironment(self.hass, self._limited, self._strict) # type: ignore[no-untyped-call]
return ret
def ensure_valid(self) -> None:
"""Return if template is valid."""
if self.is_static or self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template) # type: ignore[no-untyped-call]
except jinja2.TemplateError as err:
raise TemplateError(err) from err
def render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
return run_callback_threadsafe(
self.hass.loop,
partial(self.async_render, variables, parse_result, limited, **kwargs),
).result()
@callback
def async_render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
strict: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
compiled = self._compiled or self._ensure_compiled(limited, strict)
if variables is not None:
kwargs.update(variables)
try:
render_result = _render_with_context(self.template, compiled, **kwargs)
except Exception as err:
raise TemplateError(err) from err
render_result = render_result.strip()
if self.hass.config.legacy_templates or not parse_result:
return render_result
return self._parse_result(render_result)
def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use
"""Parse the result."""
try:
result = literal_eval(render_result)
if type(result) in RESULT_WRAPPERS:
result = RESULT_WRAPPERS[type(result)](
result, render_result=render_result
)
# If the literal_eval result is a string, use the original
# render, by not returning right here. The evaluation of strings
# resulting in strings impacts quotes, to avoid unexpected
# output; use the original render instead of the evaluated one.
# Complex and scientific values are also unexpected. Filter them out.
if (
# Filter out string and complex numbers
not isinstance(result, (str, complex))
and (
# Pass if not numeric and not a boolean
not isinstance(result, (int, float))
# Or it's a boolean (inherit from int)
or isinstance(result, bool)
# Or if it's a digit
or _IS_NUMERIC.match(render_result) is not None
)
):
return result
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return render_result
async def async_render_will_timeout(
self,
timeout: float,
variables: TemplateVarsType = None,
strict: bool = False,
**kwargs: Any,
) -> bool:
"""Check to see if rendering a template will timeout during render.
This is intended to check for expensive templates
that will make the system unstable. The template
is rendered in the executor to ensure it does not
tie up the event loop.
This function is not a security control and is only
intended to be used as a safety check when testing
templates.
This method must be run in the event loop.
"""
if self.is_static:
return False
compiled = self._compiled or self._ensure_compiled(strict=strict)
if variables is not None:
kwargs.update(variables)
self._exc_info = None
finish_event = asyncio.Event()
def _render_template() -> None:
try:
_render_with_context(self.template, compiled, **kwargs)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
self._exc_info = sys.exc_info()
finally:
run_callback_threadsafe(self.hass.loop, finish_event.set)
try:
template_render_thread = ThreadWithException(target=_render_template)
template_render_thread.start()
await asyncio.wait_for(finish_event.wait(), timeout=timeout)
if self._exc_info:
raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2]))
except asyncio.TimeoutError:
template_render_thread.raise_exc(TimeoutError)
return True
finally:
template_render_thread.join()
return False
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, strict: bool = False, **kwargs: Any
) -> RenderInfo:
"""Render the template and collect an entity filter."""
assert self.hass and _RENDER_INFO not in self.hass.data
render_info = RenderInfo(self) # type: ignore[no-untyped-call]
# pylint: disable=protected-access
if self.is_static:
render_info._result = self.template.strip()
render_info._freeze_static()
return render_info
self.hass.data[_RENDER_INFO] = render_info
try:
render_info._result = self.async_render(variables, strict=strict, **kwargs)
except TemplateError as ex:
render_info.exception = ex
finally:
del self.hass.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
if self.is_static:
return self.template
return run_callback_threadsafe(
self.hass.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
# MASKED: async_render_with_possible_json_value function (lines 528-562)
def _ensure_compiled(
self, limited: bool = False, strict: bool = False
) -> jinja2.Template:
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, "hass variable not set on template"
assert (
self._limited is None or self._limited == limited
), "can't change between limited and non limited template"
assert (
self._strict is None or self._strict == strict
), "can't change between strict and non strict template"
assert not (strict and limited), "can't combine strict and limited template"
self._limited = limited
self._strict = strict
env = self._env
self._compiled = cast(
jinja2.Template,
jinja2.Template.from_code(env, self._compiled_code, env.globals, None),
)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (
self.__class__ == other.__class__
and self.template == other.template
and self.hass == other.hass
)
def __hash__(self) -> int:
"""Hash code for template."""
return hash(self.template)
def __repr__(self) -> str:
"""Representation of Template."""
return 'Template("' + self.template + '")'
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
if "." in name:
return _get_state_if_valid(self._hass, name)
if name in _RESERVED_NAMES:
return None
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._hass, name)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_all(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states = True
def _collect_all_lifecycle(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states_lifecycle = True
def __iter__(self):
"""Return all states."""
self._collect_all()
return _state_generator(self._hass, None)
def __len__(self) -> int:
"""Return number of states."""
self._collect_all_lifecycle()
return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id):
"""Return the states."""
state = _get_state(self._hass, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self) -> str:
"""Representation of All States."""
return "<template AllStates>"
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass: HomeAssistant, domain: str) -> None:
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _get_state_if_valid(self._hass, f"{self._domain}.{name}")
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_domain(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains.add(self._domain)
def _collect_domain_lifecycle(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains_lifecycle.add(self._domain)
def __iter__(self):
"""Return the iteration over all the states."""
self._collect_domain()
return _state_generator(self._hass, self._domain)
def __len__(self) -> int:
"""Return number of states."""
self._collect_domain_lifecycle()
return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str:
"""Representation of Domain States."""
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
"""Class to represent a state object in a template."""
__slots__ = ("_hass", "_state", "_collect")
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, hass: HomeAssistant, state: State, collect: bool = True) -> None:
"""Initialize template state."""
self._hass = hass
self._state = state
self._collect = collect
def _collect_state(self) -> None:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
def __getitem__(self, item):
"""Return a property as an attribute for jinja."""
if item in _COLLECTABLE_STATE_ATTRIBUTES:
# _collect_state inlined here for performance
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
return getattr(self._state, item)
if item == "entity_id":
return self._state.entity_id
if item == "state_with_unit":
return self.state_with_unit
raise KeyError
@property
def entity_id(self):
"""Wrap State.entity_id.
Intentionally does not collect state
"""
return self._state.entity_id
@property
def state(self):
"""Wrap State.state."""
self._collect_state()
return self._state.state
@property
def attributes(self):
"""Wrap State.attributes."""
self._collect_state()
return self._state.attributes
@property
def last_changed(self):
"""Wrap State.last_changed."""
self._collect_state()
return self._state.last_changed
@property
def last_updated(self):
"""Wrap State.last_updated."""
self._collect_state()
return self._state.last_updated
@property
def context(self):
"""Wrap State.context."""
self._collect_state()
return self._state.context
@property
def domain(self):
"""Wrap State.domain."""
self._collect_state()
return self._state.domain
@property
def object_id(self):
"""Wrap State.object_id."""
self._collect_state()
return self._state.object_id
@property
def name(self):
"""Wrap State.name."""
self._collect_state()
return self._state.name
@property
def state_with_unit(self) -> str:
"""Return the state concatenated with the unit if available."""
self._collect_state()
unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return f"{self._state.state} {unit}" if unit else self._state.state
def __eq__(self, other: Any) -> bool:
"""Ensure we collect on equality check."""
self._collect_state()
return self._state.__eq__(other)
def __repr__(self) -> str:
"""Representation of Template State."""
return f"<template TemplateState({self._state.__repr__()})>"
def _collect_state(hass: HomeAssistant, entity_id: str) -> None:
entity_collect = hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.entities.add(entity_id)
def _state_generator(hass: HomeAssistant, domain: str | None) -> Generator:
"""State generator for a domain or all states."""
for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")):
yield TemplateState(hass, state, collect=False)
def _get_state_if_valid(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
state = hass.states.get(entity_id)
if state is None and not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'") # type: ignore
return _get_template_state_from_state(hass, entity_id, state)
def _get_state(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id))
def _get_template_state_from_state(
hass: HomeAssistant, entity_id: str, state: State | None
) -> TemplateState | None:
if state is None:
# Only need to collect if none, if not none collect first actual
# access to the state properties in the state wrapper.
_collect_state(hass, entity_id)
return None
return TemplateState(hass, state)
def _resolve_state(
hass: HomeAssistant, entity_id_or_state: Any
) -> State | TemplateState | None:
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(hass, entity_id_or_state)
return None
def result_as_boolean(template_result: str | None) -> bool:
"""Convert the template result to a boolean.
True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy
False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy
"""
try:
# Import here, not at top-level to avoid circular import
from homeassistant.helpers import ( # pylint: disable=import-outside-toplevel
config_validation as cv,
)
return cv.boolean(template_result)
except vol.Invalid:
return False
def expand(hass: HomeAssistant, *args: Any) -> Iterable[State]:
"""Expand out any groups into entity states."""
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(hass, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, collections.abc.Iterable):
search += entity
continue
else:
# ignore other types
continue
if entity_id.startswith(_GROUP_DOMAIN_PREFIX):
# Collect state will be called in here since it's wrapped
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
_collect_state(hass, entity_id)
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def device_entities(hass: HomeAssistant, device_id: str) -> Iterable[str]:
"""Get entity ids for entities tied to a device."""
entity_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_device(entity_reg, device_id)
return [entry.entity_id for entry in entries]
def closest(hass, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
"""
if len(args) == 1:
latitude = hass.config.latitude
longitude = hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(hass, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(hass, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
def closest_filter(hass, *args):
"""Call closest as a filter. Need to reorder arguments."""
new_args = list(args[1:])
new_args.append(args[0])
return closest(hass, *new_args)
def distance(hass, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, str) and not valid_entity_id(value):
point_state = None
else:
point_state = _resolve_state(hass, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return hass.config.distance(*locations[0])
return hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS
)
def is_state(hass: HomeAssistant, entity_id: str, state: State) -> bool:
"""Test if a state is a specific value."""
state_obj = _get_state(hass, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(hass, entity_id, name, value):
"""Test if a state's attribute is a specific value."""
attr = state_attr(hass, entity_id, name)
return attr is not None and attr == value
def state_attr(hass, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = _get_state(hass, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def now(hass):
"""Record fetching now."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.now()
def utcnow(hass):
"""Record fetching utcnow."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.utcnow()
def forgiving_round(value, precision=0, method="common"):
"""Round accepted strings."""
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
"""Filter to get arc sine of the value."""
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
"""Filter to get arc cosine of the value."""
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
"""Filter to get arc tangent of the value."""
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
"""Filter to calculate four quadrant arc tangent of y / x."""
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError, TypeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
"""Convert a JSON string to an object."""
return json.loads(value)
def to_json(value):
"""Convert an object to a JSON string."""
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
def relative_time(value):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
"""
if not isinstance(value, datetime):
return value
if not value.tzinfo:
value = dt_util.as_local(value)
if dt_util.now() < value:
return value
return dt_util.get_age(value)
def urlencode(value):
"""Urlencode dictionary and return as UTF-8 string."""
return urllib_urlencode(value).encode("utf-8")
def _render_with_context(
template_str: str, template: jinja2.Template, **kwargs: Any
) -> str:
"""Store template being rendered in a ContextVar to aid error handling."""
template_cv.set(template_str)
return template.render(**kwargs)
class LoggingUndefined(jinja2.Undefined):
"""Log on undefined variables."""
def _log_message(self):
template = template_cv.get() or ""
_LOGGER.warning(
"Template variable warning: %s when rendering '%s'",
self._undefined_message,
template,
)
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as ex:
template = template_cv.get() or ""
_LOGGER.error(
"Template variable error: %s when rendering '%s'",
self._undefined_message,
template,
)
raise ex
def __str__(self):
"""Log undefined __str___."""
self._log_message()
return super().__str__()
def __iter__(self):
"""Log undefined __iter___."""
self._log_message()
return super().__iter__()
def __bool__(self):
"""Log undefined __bool___."""
self._log_message()
return super().__bool__()
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def __init__(self, hass, limited=False, strict=False):
"""Initialise template environment."""
if not strict:
undefined = LoggingUndefined
else:
undefined = jinja2.StrictUndefined
super().__init__(undefined=undefined)
self.hass = hass
self.template_cache = weakref.WeakValueDictionary()
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["as_local"] = dt_util.as_local
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["as_local"] = dt_util.as_local
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = relative_time
self.globals["timedelta"] = timedelta
self.globals["strptime"] = strptime
self.globals["urlencode"] = urlencode
self.globals["max"] = max
self.globals["min"] = min
if hass is None:
return
# We mark these as a context functions to ensure they get
# evaluated fresh with every execution, rather than executed
# at compile time and the value stored. The context itself
# can be discarded, we only need to get at the hass object.
def hassfunction(func):
"""Wrap function that depend on hass."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(hass, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["device_entities"] = hassfunction(device_entities)
self.filters["device_entities"] = contextfilter(self.globals["device_entities"])
if limited:
# Only device_entities is available to limited templates, mark other
# functions and filters as unsupported.
def unsupported(name):
def warn_unsupported(*args, **kwargs):
raise TemplateError(
f"Use of '{name}' is not supported in limited templates"
)
return warn_unsupported
hass_globals = [
"closest",
"distance",
"expand",
"is_state",
"is_state_attr",
"state_attr",
"states",
"utcnow",
"now",
]
hass_filters = ["closest", "expand"]
for glob in hass_globals:
self.globals[glob] = unsupported(glob)
for filt in hass_filters:
self.filters[filt] = unsupported(filt)
return
self.globals["expand"] = hassfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = hassfunction(closest)
self.filters["closest"] = contextfilter(hassfunction(closest_filter))
self.globals["distance"] = hassfunction(distance)
self.globals["is_state"] = hassfunction(is_state)
self.globals["is_state_attr"] = hassfunction(is_state_attr)
self.globals["state_attr"] = hassfunction(state_attr)
self.globals["states"] = AllStates(hass)
self.globals["utcnow"] = hassfunction(utcnow)
self.globals["now"] = hassfunction(now)
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
if isinstance(obj, (AllStates, DomainStates, TemplateState)):
return attr[0] != "_"
if isinstance(obj, Namespace):
return True
return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile the template."""
if (
name is not None
or filename is not None
or raw is not False
or defer_init is not False
):
# If there are any non-default keywords args, we do
# not cache. In prodution we currently do not have
# any instance of this.
return super().compile(source, name, filename, raw, defer_init)
cached = self.template_cache.get(source)
if cached is None:
cached = self.template_cache[source] = super().compile(source)
return cached
_NO_HASS_ENV = TemplateEnvironment(None) # type: ignore[no-untyped-call]
|
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self.is_static:
return self.template
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
with suppress(ValueError, TypeError):
variables["value_json"] = json.loads(value)
try:
return _render_with_context(
self.template, self._compiled, **variables
).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
| 528
| 562
|
"""Template helper methods for rendering strings with Home Assistant data."""
from __future__ import annotations
from ast import literal_eval
import asyncio
import base64
import collections.abc
from contextlib import suppress
from contextvars import ContextVar
from datetime import datetime, timedelta
from functools import partial, wraps
import json
import logging
import math
from operator import attrgetter
import random
import re
import sys
from typing import Any, Generator, Iterable, cast
from urllib.parse import urlencode as urllib_urlencode
import weakref
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace # type: ignore
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
STATE_UNKNOWN,
)
from homeassistant.core import (
HomeAssistant,
State,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import entity_registry, location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.thread import ThreadWithException
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_ENVIRONMENT_LIMITED = "template.environment_limited"
_ENVIRONMENT_STRICT = "template.environment_strict"
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#")
# Match "simple" ints and floats. -1.0, 1, +5, 5.0
_IS_NUMERIC = re.compile(r"^[+-]?(?!0\d)\d*(?:\.\d*)?$")
_RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"}
_GROUP_DOMAIN_PREFIX = "group."
_COLLECTABLE_STATE_ATTRIBUTES = {
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"name",
}
ALL_STATES_RATE_LIMIT = timedelta(minutes=1)
DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1)
template_cv: ContextVar[str | None] = ContextVar("template_cv", default=None)
@bind_hass
def attach(hass: HomeAssistant, obj: Any) -> None:
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, collections.abc.Mapping):
for child_key, child_value in obj.items():
attach(hass, child_key)
attach(hass, child_value)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(
value: Any, variables: TemplateVarsType = None, limited: bool = False
) -> Any:
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, collections.abc.Mapping):
return {
render_complex(key, variables): render_complex(item, variables)
for key, item in value.items()
}
if isinstance(value, Template):
return value.async_render(variables, limited=limited)
return value
def is_complex(value: Any) -> bool:
"""Test if data structure is a complex template."""
if isinstance(value, Template):
return True
if isinstance(value, list):
return any(is_complex(val) for val in value)
if isinstance(value, collections.abc.Mapping):
return any(is_complex(val) for val in value.keys()) or any(
is_complex(val) for val in value.values()
)
return False
def is_template_string(maybe_template: str) -> bool:
"""Check if the input is a Jinja2 template."""
return _RE_JINJA_DELIMITERS.search(maybe_template) is not None
class ResultWrapper:
"""Result wrapper class to store render result."""
render_result: str | None
def gen_result_wrapper(kls):
"""Generate a result wrapper."""
class Wrapper(kls, ResultWrapper):
"""Wrapper of a kls that can store render_result."""
def __init__(self, *args: tuple, render_result: str | None = None) -> None:
super().__init__(*args)
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
# Can't get set repr to work
if kls is set:
return str(set(self))
return cast(str, kls.__str__(self))
return self.render_result
return Wrapper
class TupleWrapper(tuple, ResultWrapper):
"""Wrap a tuple."""
# This is all magic to be allowed to subclass a tuple.
def __new__(cls, value: tuple, *, render_result: str | None = None) -> TupleWrapper:
"""Create a new tuple class."""
return super().__new__(cls, tuple(value))
# pylint: disable=super-init-not-called
def __init__(self, value: tuple, *, render_result: str | None = None):
"""Initialize a new tuple class."""
self.render_result = render_result
def __str__(self) -> str:
"""Return string representation."""
if self.render_result is None:
return super().__str__()
return self.render_result
RESULT_WRAPPERS: dict[type, type] = {
kls: gen_result_wrapper(kls) # type: ignore[no-untyped-call]
for kls in (list, dict, set)
}
RESULT_WRAPPERS[tuple] = TupleWrapper
def _true(arg: Any) -> bool:
return True
def _false(arg: Any) -> bool:
return False
class RenderInfo:
"""Holds information about a template render."""
def __init__(self, template):
"""Initialise."""
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self.filter = _true
self._result: str | None = None
self.is_static = False
self.exception: TemplateError | None = None
self.all_states = False
self.all_states_lifecycle = False
self.domains = set()
self.domains_lifecycle = set()
self.entities = set()
self.rate_limit: timedelta | None = None
self.has_time = False
def __repr__(self) -> str:
"""Representation of RenderInfo."""
return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}"
def _filter_domains_and_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific domains or entities."""
return (
split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities
)
def _filter_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific entities."""
return entity_id in self.entities
def _filter_lifecycle_domains(self, entity_id: str) -> bool:
"""Template should re-render if the entity is added or removed with domains watched."""
return split_entity_id(entity_id)[0] in self.domains_lifecycle
def result(self) -> str:
"""Results of the template computation."""
if self.exception is not None:
raise self.exception
return cast(str, self._result)
def _freeze_static(self) -> None:
self.is_static = True
self._freeze_sets()
self.all_states = False
def _freeze_sets(self) -> None:
self.entities = frozenset(self.entities)
self.domains = frozenset(self.domains)
self.domains_lifecycle = frozenset(self.domains_lifecycle)
def _freeze(self) -> None:
self._freeze_sets()
if self.rate_limit is None:
if self.all_states or self.exception:
self.rate_limit = ALL_STATES_RATE_LIMIT
elif self.domains or self.domains_lifecycle:
self.rate_limit = DOMAIN_STATES_RATE_LIMIT
if self.exception:
return
if not self.all_states_lifecycle:
if self.domains_lifecycle:
self.filter_lifecycle = self._filter_lifecycle_domains
else:
self.filter_lifecycle = _false
if self.all_states:
return
if self.domains:
self.filter = self._filter_domains_and_entities
elif self.entities:
self.filter = self._filter_entities
else:
self.filter = _false
class Template:
"""Class to hold a template and manage caching and rendering."""
__slots__ = (
"__weakref__",
"template",
"hass",
"is_static",
"_compiled_code",
"_compiled",
"_exc_info",
"_limited",
"_strict",
)
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template.strip()
self._compiled_code = None
self._compiled: jinja2.Template | None = None
self.hass = hass
self.is_static = not is_template_string(template)
self._exc_info = None
self._limited = None
self._strict = None
@property
def _env(self) -> TemplateEnvironment:
if self.hass is None:
return _NO_HASS_ENV
if self._limited:
wanted_env = _ENVIRONMENT_LIMITED
elif self._strict:
wanted_env = _ENVIRONMENT_STRICT
else:
wanted_env = _ENVIRONMENT
ret: TemplateEnvironment | None = self.hass.data.get(wanted_env)
if ret is None:
ret = self.hass.data[wanted_env] = TemplateEnvironment(self.hass, self._limited, self._strict) # type: ignore[no-untyped-call]
return ret
def ensure_valid(self) -> None:
"""Return if template is valid."""
if self.is_static or self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template) # type: ignore[no-untyped-call]
except jinja2.TemplateError as err:
raise TemplateError(err) from err
def render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
return run_callback_threadsafe(
self.hass.loop,
partial(self.async_render, variables, parse_result, limited, **kwargs),
).result()
@callback
def async_render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
strict: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
compiled = self._compiled or self._ensure_compiled(limited, strict)
if variables is not None:
kwargs.update(variables)
try:
render_result = _render_with_context(self.template, compiled, **kwargs)
except Exception as err:
raise TemplateError(err) from err
render_result = render_result.strip()
if self.hass.config.legacy_templates or not parse_result:
return render_result
return self._parse_result(render_result)
def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use
"""Parse the result."""
try:
result = literal_eval(render_result)
if type(result) in RESULT_WRAPPERS:
result = RESULT_WRAPPERS[type(result)](
result, render_result=render_result
)
# If the literal_eval result is a string, use the original
# render, by not returning right here. The evaluation of strings
# resulting in strings impacts quotes, to avoid unexpected
# output; use the original render instead of the evaluated one.
# Complex and scientific values are also unexpected. Filter them out.
if (
# Filter out string and complex numbers
not isinstance(result, (str, complex))
and (
# Pass if not numeric and not a boolean
not isinstance(result, (int, float))
# Or it's a boolean (inherit from int)
or isinstance(result, bool)
# Or if it's a digit
or _IS_NUMERIC.match(render_result) is not None
)
):
return result
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return render_result
async def async_render_will_timeout(
self,
timeout: float,
variables: TemplateVarsType = None,
strict: bool = False,
**kwargs: Any,
) -> bool:
"""Check to see if rendering a template will timeout during render.
This is intended to check for expensive templates
that will make the system unstable. The template
is rendered in the executor to ensure it does not
tie up the event loop.
This function is not a security control and is only
intended to be used as a safety check when testing
templates.
This method must be run in the event loop.
"""
if self.is_static:
return False
compiled = self._compiled or self._ensure_compiled(strict=strict)
if variables is not None:
kwargs.update(variables)
self._exc_info = None
finish_event = asyncio.Event()
def _render_template() -> None:
try:
_render_with_context(self.template, compiled, **kwargs)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
self._exc_info = sys.exc_info()
finally:
run_callback_threadsafe(self.hass.loop, finish_event.set)
try:
template_render_thread = ThreadWithException(target=_render_template)
template_render_thread.start()
await asyncio.wait_for(finish_event.wait(), timeout=timeout)
if self._exc_info:
raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2]))
except asyncio.TimeoutError:
template_render_thread.raise_exc(TimeoutError)
return True
finally:
template_render_thread.join()
return False
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, strict: bool = False, **kwargs: Any
) -> RenderInfo:
"""Render the template and collect an entity filter."""
assert self.hass and _RENDER_INFO not in self.hass.data
render_info = RenderInfo(self) # type: ignore[no-untyped-call]
# pylint: disable=protected-access
if self.is_static:
render_info._result = self.template.strip()
render_info._freeze_static()
return render_info
self.hass.data[_RENDER_INFO] = render_info
try:
render_info._result = self.async_render(variables, strict=strict, **kwargs)
except TemplateError as ex:
render_info.exception = ex
finally:
del self.hass.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
if self.is_static:
return self.template
return run_callback_threadsafe(
self.hass.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self.is_static:
return self.template
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
with suppress(ValueError, TypeError):
variables["value_json"] = json.loads(value)
try:
return _render_with_context(
self.template, self._compiled, **variables
).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(
self, limited: bool = False, strict: bool = False
) -> jinja2.Template:
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, "hass variable not set on template"
assert (
self._limited is None or self._limited == limited
), "can't change between limited and non limited template"
assert (
self._strict is None or self._strict == strict
), "can't change between strict and non strict template"
assert not (strict and limited), "can't combine strict and limited template"
self._limited = limited
self._strict = strict
env = self._env
self._compiled = cast(
jinja2.Template,
jinja2.Template.from_code(env, self._compiled_code, env.globals, None),
)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (
self.__class__ == other.__class__
and self.template == other.template
and self.hass == other.hass
)
def __hash__(self) -> int:
"""Hash code for template."""
return hash(self.template)
def __repr__(self) -> str:
"""Representation of Template."""
return 'Template("' + self.template + '")'
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
if "." in name:
return _get_state_if_valid(self._hass, name)
if name in _RESERVED_NAMES:
return None
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._hass, name)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_all(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states = True
def _collect_all_lifecycle(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states_lifecycle = True
def __iter__(self):
"""Return all states."""
self._collect_all()
return _state_generator(self._hass, None)
def __len__(self) -> int:
"""Return number of states."""
self._collect_all_lifecycle()
return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id):
"""Return the states."""
state = _get_state(self._hass, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self) -> str:
"""Representation of All States."""
return "<template AllStates>"
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass: HomeAssistant, domain: str) -> None:
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _get_state_if_valid(self._hass, f"{self._domain}.{name}")
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_domain(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains.add(self._domain)
def _collect_domain_lifecycle(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains_lifecycle.add(self._domain)
def __iter__(self):
"""Return the iteration over all the states."""
self._collect_domain()
return _state_generator(self._hass, self._domain)
def __len__(self) -> int:
"""Return number of states."""
self._collect_domain_lifecycle()
return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str:
"""Representation of Domain States."""
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
"""Class to represent a state object in a template."""
__slots__ = ("_hass", "_state", "_collect")
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, hass: HomeAssistant, state: State, collect: bool = True) -> None:
"""Initialize template state."""
self._hass = hass
self._state = state
self._collect = collect
def _collect_state(self) -> None:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
def __getitem__(self, item):
"""Return a property as an attribute for jinja."""
if item in _COLLECTABLE_STATE_ATTRIBUTES:
# _collect_state inlined here for performance
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
return getattr(self._state, item)
if item == "entity_id":
return self._state.entity_id
if item == "state_with_unit":
return self.state_with_unit
raise KeyError
@property
def entity_id(self):
"""Wrap State.entity_id.
Intentionally does not collect state
"""
return self._state.entity_id
@property
def state(self):
"""Wrap State.state."""
self._collect_state()
return self._state.state
@property
def attributes(self):
"""Wrap State.attributes."""
self._collect_state()
return self._state.attributes
@property
def last_changed(self):
"""Wrap State.last_changed."""
self._collect_state()
return self._state.last_changed
@property
def last_updated(self):
"""Wrap State.last_updated."""
self._collect_state()
return self._state.last_updated
@property
def context(self):
"""Wrap State.context."""
self._collect_state()
return self._state.context
@property
def domain(self):
"""Wrap State.domain."""
self._collect_state()
return self._state.domain
@property
def object_id(self):
"""Wrap State.object_id."""
self._collect_state()
return self._state.object_id
@property
def name(self):
"""Wrap State.name."""
self._collect_state()
return self._state.name
@property
def state_with_unit(self) -> str:
"""Return the state concatenated with the unit if available."""
self._collect_state()
unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return f"{self._state.state} {unit}" if unit else self._state.state
def __eq__(self, other: Any) -> bool:
"""Ensure we collect on equality check."""
self._collect_state()
return self._state.__eq__(other)
def __repr__(self) -> str:
"""Representation of Template State."""
return f"<template TemplateState({self._state.__repr__()})>"
def _collect_state(hass: HomeAssistant, entity_id: str) -> None:
entity_collect = hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.entities.add(entity_id)
def _state_generator(hass: HomeAssistant, domain: str | None) -> Generator:
"""State generator for a domain or all states."""
for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")):
yield TemplateState(hass, state, collect=False)
def _get_state_if_valid(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
state = hass.states.get(entity_id)
if state is None and not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'") # type: ignore
return _get_template_state_from_state(hass, entity_id, state)
def _get_state(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id))
def _get_template_state_from_state(
hass: HomeAssistant, entity_id: str, state: State | None
) -> TemplateState | None:
if state is None:
# Only need to collect if none, if not none collect first actual
# access to the state properties in the state wrapper.
_collect_state(hass, entity_id)
return None
return TemplateState(hass, state)
def _resolve_state(
hass: HomeAssistant, entity_id_or_state: Any
) -> State | TemplateState | None:
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(hass, entity_id_or_state)
return None
def result_as_boolean(template_result: str | None) -> bool:
"""Convert the template result to a boolean.
True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy
False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy
"""
try:
# Import here, not at top-level to avoid circular import
from homeassistant.helpers import ( # pylint: disable=import-outside-toplevel
config_validation as cv,
)
return cv.boolean(template_result)
except vol.Invalid:
return False
def expand(hass: HomeAssistant, *args: Any) -> Iterable[State]:
"""Expand out any groups into entity states."""
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(hass, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, collections.abc.Iterable):
search += entity
continue
else:
# ignore other types
continue
if entity_id.startswith(_GROUP_DOMAIN_PREFIX):
# Collect state will be called in here since it's wrapped
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
_collect_state(hass, entity_id)
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def device_entities(hass: HomeAssistant, device_id: str) -> Iterable[str]:
"""Get entity ids for entities tied to a device."""
entity_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_device(entity_reg, device_id)
return [entry.entity_id for entry in entries]
def closest(hass, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
"""
if len(args) == 1:
latitude = hass.config.latitude
longitude = hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(hass, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(hass, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
def closest_filter(hass, *args):
"""Call closest as a filter. Need to reorder arguments."""
new_args = list(args[1:])
new_args.append(args[0])
return closest(hass, *new_args)
def distance(hass, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, str) and not valid_entity_id(value):
point_state = None
else:
point_state = _resolve_state(hass, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return hass.config.distance(*locations[0])
return hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS
)
def is_state(hass: HomeAssistant, entity_id: str, state: State) -> bool:
"""Test if a state is a specific value."""
state_obj = _get_state(hass, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(hass, entity_id, name, value):
"""Test if a state's attribute is a specific value."""
attr = state_attr(hass, entity_id, name)
return attr is not None and attr == value
def state_attr(hass, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = _get_state(hass, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def now(hass):
"""Record fetching now."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.now()
def utcnow(hass):
"""Record fetching utcnow."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.utcnow()
def forgiving_round(value, precision=0, method="common"):
"""Round accepted strings."""
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
"""Filter to get arc sine of the value."""
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
"""Filter to get arc cosine of the value."""
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
"""Filter to get arc tangent of the value."""
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
"""Filter to calculate four quadrant arc tangent of y / x."""
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError, TypeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
"""Convert a JSON string to an object."""
return json.loads(value)
def to_json(value):
"""Convert an object to a JSON string."""
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
def relative_time(value):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
"""
if not isinstance(value, datetime):
return value
if not value.tzinfo:
value = dt_util.as_local(value)
if dt_util.now() < value:
return value
return dt_util.get_age(value)
def urlencode(value):
"""Urlencode dictionary and return as UTF-8 string."""
return urllib_urlencode(value).encode("utf-8")
def _render_with_context(
template_str: str, template: jinja2.Template, **kwargs: Any
) -> str:
"""Store template being rendered in a ContextVar to aid error handling."""
template_cv.set(template_str)
return template.render(**kwargs)
class LoggingUndefined(jinja2.Undefined):
"""Log on undefined variables."""
def _log_message(self):
template = template_cv.get() or ""
_LOGGER.warning(
"Template variable warning: %s when rendering '%s'",
self._undefined_message,
template,
)
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as ex:
template = template_cv.get() or ""
_LOGGER.error(
"Template variable error: %s when rendering '%s'",
self._undefined_message,
template,
)
raise ex
def __str__(self):
"""Log undefined __str___."""
self._log_message()
return super().__str__()
def __iter__(self):
"""Log undefined __iter___."""
self._log_message()
return super().__iter__()
def __bool__(self):
"""Log undefined __bool___."""
self._log_message()
return super().__bool__()
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def __init__(self, hass, limited=False, strict=False):
"""Initialise template environment."""
if not strict:
undefined = LoggingUndefined
else:
undefined = jinja2.StrictUndefined
super().__init__(undefined=undefined)
self.hass = hass
self.template_cache = weakref.WeakValueDictionary()
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["as_local"] = dt_util.as_local
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["as_local"] = dt_util.as_local
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = relative_time
self.globals["timedelta"] = timedelta
self.globals["strptime"] = strptime
self.globals["urlencode"] = urlencode
self.globals["max"] = max
self.globals["min"] = min
if hass is None:
return
# We mark these as a context functions to ensure they get
# evaluated fresh with every execution, rather than executed
# at compile time and the value stored. The context itself
# can be discarded, we only need to get at the hass object.
def hassfunction(func):
"""Wrap function that depend on hass."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(hass, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["device_entities"] = hassfunction(device_entities)
self.filters["device_entities"] = contextfilter(self.globals["device_entities"])
if limited:
# Only device_entities is available to limited templates, mark other
# functions and filters as unsupported.
def unsupported(name):
def warn_unsupported(*args, **kwargs):
raise TemplateError(
f"Use of '{name}' is not supported in limited templates"
)
return warn_unsupported
hass_globals = [
"closest",
"distance",
"expand",
"is_state",
"is_state_attr",
"state_attr",
"states",
"utcnow",
"now",
]
hass_filters = ["closest", "expand"]
for glob in hass_globals:
self.globals[glob] = unsupported(glob)
for filt in hass_filters:
self.filters[filt] = unsupported(filt)
return
self.globals["expand"] = hassfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = hassfunction(closest)
self.filters["closest"] = contextfilter(hassfunction(closest_filter))
self.globals["distance"] = hassfunction(distance)
self.globals["is_state"] = hassfunction(is_state)
self.globals["is_state_attr"] = hassfunction(is_state_attr)
self.globals["state_attr"] = hassfunction(state_attr)
self.globals["states"] = AllStates(hass)
self.globals["utcnow"] = hassfunction(utcnow)
self.globals["now"] = hassfunction(now)
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
if isinstance(obj, (AllStates, DomainStates, TemplateState)):
return attr[0] != "_"
if isinstance(obj, Namespace):
return True
return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile the template."""
if (
name is not None
or filename is not None
or raw is not False
or defer_init is not False
):
# If there are any non-default keywords args, we do
# not cache. In prodution we currently do not have
# any instance of this.
return super().compile(source, name, filename, raw, defer_init)
cached = self.template_cache.get(source)
if cached is None:
cached = self.template_cache[source] = super().compile(source)
return cached
_NO_HASS_ENV = TemplateEnvironment(None) # type: ignore[no-untyped-call]
|
distance_weighted_triplet_loss
|
distance weighted sampling + triplet loss
Args:
labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
margin: Float, margin term in the loss function.
squared: Boolean, whether or not to square the triplet distances.
nu: float. Regularization parameter for beta.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
triplet_loss: tf.float32 scalar
|
import tensorflow as tf
from tensorflow.contrib.losses.python.metric_learning.metric_loss_ops import pairwise_distance
def dist_weighted_sampling(labels, embeddings, high_var_threshold=0.5, nonzero_loss_threshold=1.4, neg_multiplier=1):
"""
Distance weighted sampling.
# References
- [sampling matters in deep embedding learning]
(https://arxiv.org/abs/1706.07567)
# Arguments:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
a_indices: indices of anchors.
anchors: sampled anchor embeddings.
positives: sampled positive embeddings.
negatives: sampled negative embeddings.
"""
if not isinstance(neg_multiplier, int):
raise ValueError("`neg_multiplier` must be an integer.")
n = tf.size(labels)
if not isinstance(embeddings, tf.Tensor):
embeddings = tf.convert_to_tensor(embeddings)
d = embeddings.shape[1].value
distances = pairwise_distance(embeddings, squared=False)
# cut off to void high variance.
distances = tf.maximum(distances, high_var_threshold)
# subtract max(log(distance)) for stability
log_weights = (2 - d) * tf.log(distances + 1e-16) - 0.5 * (d - 3) * tf.log(1 + 1e-16 - 0.25 * (distances**2))
weights = tf.exp(log_weights - tf.reduce_max(log_weights))
# sample only negative examples by setting weights of the same class examples to 0.
lshape = tf.shape(labels)
assert lshape.shape == 1
labels = tf.reshape(labels, [lshape[0], 1])
adjacency = tf.equal(labels, tf.transpose(labels))
adjacency_not = tf.logical_not(adjacency)
mask = tf.cast(adjacency_not, tf.float32)
# number of negative/positive samples to sampling per sample.
# For imbalanced data, this sampling method can be a sample weighted method.
adjacency_ex = tf.cast(adjacency, tf.int32) - tf.diag(tf.ones(n, dtype=tf.int32))
m = tf.reduce_sum(adjacency_ex, axis=1)
if tf.reduce_min(m) == 0:
m = tf.diag(tf.cast(tf.equal(m,0), tf.int32))
adjacency_ex += m
k = tf.maximum(tf.reduce_max(m),1) * neg_multiplier
pos_weights = tf.cast(adjacency_ex, tf.float32)
weights = weights * mask * tf.cast(distances < nonzero_loss_threshold, tf.float32)
weights = weights / (tf.reduce_sum(weights, axis=1, keepdims=True) + 1e-16)
# anchors indices
a_indices = tf.reshape(tf.range(n), (-1,1))
a_indices = tf.tile(a_indices, [1, k])
a_indices = tf.reshape(a_indices, (-1,))
# negative sampling
def neg_sampling(i):
s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims(weights[i] + 1e-16, axis=0)), k, output_dtype=tf.int32), axis=0)
return s
n_indices = tf.map_fn(neg_sampling, tf.range(n), dtype=tf.int32)
n_indices = tf.reshape(n_indices, (-1,))
# postive samping
def pos_sampling(i):
s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims(pos_weights[i] + 1e-16, axis=0)), k, output_dtype=tf.int32), axis=0)
return s
p_indices = tf.map_fn(pos_sampling, tf.range(n), dtype=tf.int32)
p_indices = tf.reshape(p_indices, (-1,))
anchors = tf.gather(embeddings, a_indices, name='gather_anchors')
positives = tf.gather(embeddings, p_indices, name='gather_pos')
negatives = tf.gather(embeddings, n_indices, name='gather_neg')
return a_indices, anchors, positives, negatives
def margin_based_loss(labels, embeddings, beta_in=1.0, margin=0.2, nu=0.0, high_var_threshold=0.5,
nonzero_loss_threshold=1.4, neg_multiplier=1):
"""
Computes the margin base loss.
# References
- [sampling matters in deep embedding learning]
(https://arxiv.org/abs/1706.07567)
Args:
labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
beta_in: float,int or 1-D, float `Tensor` with shape [labels_size] of multi-class boundary parameters.
margin: Float, margin term in the loss function.
nu: float. Regularization parameter for beta.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
margin_based_Loss: tf.float32 scalar
"""
a_indices, anchors, positives, negatives = dist_weighted_sampling(labels,
embeddings,
high_var_threshold=high_var_threshold,
nonzero_loss_threshold=nonzero_loss_threshold,
neg_multiplier=neg_multiplier)
if isinstance(beta_in, (float,int)):
beta = beta_in
beta_reg_loss = 0.0
else:
if isinstance(beta_in, tf.Tensor):
assert tf.shape(beta_in).shape == 1
k = tf.size(a_indices) / tf.size(labels)
k = tf.cast(k, tf.int32)
beta = tf.reshape(beta_in, (-1, 1))
beta = tf.tile(beta, [1, k])
beta = tf.reshape(beta, (-1,))
beta_reg_loss = tf.reduce_sum(beta) * nu
else:
raise ValueError("`beta_in` must be one of [float, int, tf.Tensor].")
d_ap = tf.sqrt(tf.reduce_sum(tf.square(positives - anchors), axis=1) + 1e-16)
d_an = tf.sqrt(tf.reduce_sum(tf.square(negatives - anchors), axis=1) + 1e-16)
pos_loss = tf.maximum(margin + d_ap - beta, 0)
neg_loss = tf.maximum(margin + beta - d_an, 0)
pair_cnt = tf.cast(tf.size(a_indices), tf.float32)
# normalize based on the number of pairs
loss = (tf.reduce_sum(pos_loss) + tf.reduce_sum(neg_loss) + beta_reg_loss) / pair_cnt
return loss
# MASKED: distance_weighted_triplet_loss function (lines 145-175)
|
def distance_weighted_triplet_loss(labels, embeddings, margin=1.0, squared=False, high_var_threshold=0.5,
nonzero_loss_threshold=1.4, neg_multiplier=1):
"""distance weighted sampling + triplet loss
Args:
labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
margin: Float, margin term in the loss function.
squared: Boolean, whether or not to square the triplet distances.
nu: float. Regularization parameter for beta.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
triplet_loss: tf.float32 scalar
"""
a_indices, anchors, positives, negatives = dist_weighted_sampling(labels,
embeddings,
high_var_threshold=high_var_threshold,
nonzero_loss_threshold=nonzero_loss_threshold,
neg_multiplier=neg_multiplier)
d_ap = tf.reduce_sum(tf.square(positives - anchors), axis=1)
d_an = tf.reduce_sum(tf.square(negatives - anchors), axis=1)
if not squared:
d_ap = K.sqrt(d_ap + 1e-16)
d_an = K.sqrt(d_an + 1e-16)
loss = tf.maximum(d_ap - d_an + margin, 0)
loss = tf.reduce_mean(loss)
return loss
| 145
| 175
|
import tensorflow as tf
from tensorflow.contrib.losses.python.metric_learning.metric_loss_ops import pairwise_distance
def dist_weighted_sampling(labels, embeddings, high_var_threshold=0.5, nonzero_loss_threshold=1.4, neg_multiplier=1):
"""
Distance weighted sampling.
# References
- [sampling matters in deep embedding learning]
(https://arxiv.org/abs/1706.07567)
# Arguments:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
a_indices: indices of anchors.
anchors: sampled anchor embeddings.
positives: sampled positive embeddings.
negatives: sampled negative embeddings.
"""
if not isinstance(neg_multiplier, int):
raise ValueError("`neg_multiplier` must be an integer.")
n = tf.size(labels)
if not isinstance(embeddings, tf.Tensor):
embeddings = tf.convert_to_tensor(embeddings)
d = embeddings.shape[1].value
distances = pairwise_distance(embeddings, squared=False)
# cut off to void high variance.
distances = tf.maximum(distances, high_var_threshold)
# subtract max(log(distance)) for stability
log_weights = (2 - d) * tf.log(distances + 1e-16) - 0.5 * (d - 3) * tf.log(1 + 1e-16 - 0.25 * (distances**2))
weights = tf.exp(log_weights - tf.reduce_max(log_weights))
# sample only negative examples by setting weights of the same class examples to 0.
lshape = tf.shape(labels)
assert lshape.shape == 1
labels = tf.reshape(labels, [lshape[0], 1])
adjacency = tf.equal(labels, tf.transpose(labels))
adjacency_not = tf.logical_not(adjacency)
mask = tf.cast(adjacency_not, tf.float32)
# number of negative/positive samples to sampling per sample.
# For imbalanced data, this sampling method can be a sample weighted method.
adjacency_ex = tf.cast(adjacency, tf.int32) - tf.diag(tf.ones(n, dtype=tf.int32))
m = tf.reduce_sum(adjacency_ex, axis=1)
if tf.reduce_min(m) == 0:
m = tf.diag(tf.cast(tf.equal(m,0), tf.int32))
adjacency_ex += m
k = tf.maximum(tf.reduce_max(m),1) * neg_multiplier
pos_weights = tf.cast(adjacency_ex, tf.float32)
weights = weights * mask * tf.cast(distances < nonzero_loss_threshold, tf.float32)
weights = weights / (tf.reduce_sum(weights, axis=1, keepdims=True) + 1e-16)
# anchors indices
a_indices = tf.reshape(tf.range(n), (-1,1))
a_indices = tf.tile(a_indices, [1, k])
a_indices = tf.reshape(a_indices, (-1,))
# negative sampling
def neg_sampling(i):
s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims(weights[i] + 1e-16, axis=0)), k, output_dtype=tf.int32), axis=0)
return s
n_indices = tf.map_fn(neg_sampling, tf.range(n), dtype=tf.int32)
n_indices = tf.reshape(n_indices, (-1,))
# postive samping
def pos_sampling(i):
s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims(pos_weights[i] + 1e-16, axis=0)), k, output_dtype=tf.int32), axis=0)
return s
p_indices = tf.map_fn(pos_sampling, tf.range(n), dtype=tf.int32)
p_indices = tf.reshape(p_indices, (-1,))
anchors = tf.gather(embeddings, a_indices, name='gather_anchors')
positives = tf.gather(embeddings, p_indices, name='gather_pos')
negatives = tf.gather(embeddings, n_indices, name='gather_neg')
return a_indices, anchors, positives, negatives
def margin_based_loss(labels, embeddings, beta_in=1.0, margin=0.2, nu=0.0, high_var_threshold=0.5,
nonzero_loss_threshold=1.4, neg_multiplier=1):
"""
Computes the margin base loss.
# References
- [sampling matters in deep embedding learning]
(https://arxiv.org/abs/1706.07567)
Args:
labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
beta_in: float,int or 1-D, float `Tensor` with shape [labels_size] of multi-class boundary parameters.
margin: Float, margin term in the loss function.
nu: float. Regularization parameter for beta.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
margin_based_Loss: tf.float32 scalar
"""
a_indices, anchors, positives, negatives = dist_weighted_sampling(labels,
embeddings,
high_var_threshold=high_var_threshold,
nonzero_loss_threshold=nonzero_loss_threshold,
neg_multiplier=neg_multiplier)
if isinstance(beta_in, (float,int)):
beta = beta_in
beta_reg_loss = 0.0
else:
if isinstance(beta_in, tf.Tensor):
assert tf.shape(beta_in).shape == 1
k = tf.size(a_indices) / tf.size(labels)
k = tf.cast(k, tf.int32)
beta = tf.reshape(beta_in, (-1, 1))
beta = tf.tile(beta, [1, k])
beta = tf.reshape(beta, (-1,))
beta_reg_loss = tf.reduce_sum(beta) * nu
else:
raise ValueError("`beta_in` must be one of [float, int, tf.Tensor].")
d_ap = tf.sqrt(tf.reduce_sum(tf.square(positives - anchors), axis=1) + 1e-16)
d_an = tf.sqrt(tf.reduce_sum(tf.square(negatives - anchors), axis=1) + 1e-16)
pos_loss = tf.maximum(margin + d_ap - beta, 0)
neg_loss = tf.maximum(margin + beta - d_an, 0)
pair_cnt = tf.cast(tf.size(a_indices), tf.float32)
# normalize based on the number of pairs
loss = (tf.reduce_sum(pos_loss) + tf.reduce_sum(neg_loss) + beta_reg_loss) / pair_cnt
return loss
def distance_weighted_triplet_loss(labels, embeddings, margin=1.0, squared=False, high_var_threshold=0.5,
nonzero_loss_threshold=1.4, neg_multiplier=1):
"""distance weighted sampling + triplet loss
Args:
labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
margin: Float, margin term in the loss function.
squared: Boolean, whether or not to square the triplet distances.
nu: float. Regularization parameter for beta.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
triplet_loss: tf.float32 scalar
"""
a_indices, anchors, positives, negatives = dist_weighted_sampling(labels,
embeddings,
high_var_threshold=high_var_threshold,
nonzero_loss_threshold=nonzero_loss_threshold,
neg_multiplier=neg_multiplier)
d_ap = tf.reduce_sum(tf.square(positives - anchors), axis=1)
d_an = tf.reduce_sum(tf.square(negatives - anchors), axis=1)
if not squared:
d_ap = K.sqrt(d_ap + 1e-16)
d_an = K.sqrt(d_an + 1e-16)
loss = tf.maximum(d_ap - d_an + margin, 0)
loss = tf.reduce_mean(loss)
return loss
|
calculate_score_for_each_mood
|
利用谷歌nima模型对图片进行评分
paper: https://arxiv.org/abs/1709.05424
pytorch model: https://github.com/truskovskiyk/nima.pytorch.git
计算每条说说中图片的平均分
对于没有图片的按均值进行填充
:return:
|
from src.analysis.QQZoneAnalysis import QQZoneAnalysis
import json
from src.util.constant import BASE_DIR
from src.util.util import get_mktime2
import pandas as pd
import re
from src.analysis.SentimentClassify import SentimentClassify
class TrainMood(QQZoneAnalysis):
"""
生成各种训练需要的数据集
"""
def __init__(self, use_redis=False, debug=True, file_name_head=''):
QQZoneAnalysis.__init__(self, use_redis=use_redis, debug=debug, username=file_name_head, analysis_friend=False)
TRAIN_BASE_DIR = BASE_DIR + file_name_head + '/data/train/'
self.MOOD_DATA_SCORE_FILE_NAME = TRAIN_BASE_DIR + 'score_mood_data.csv'
self.RE_DO_SENTIMENT_FILE_NAME = TRAIN_BASE_DIR + 're_do_mood_data.csv'
self.TEXT_LABEL_TRAIN_DATA = TRAIN_BASE_DIR + 'mood_text.csv'
self.TRAIN_DATA_AFTER_CLASSIFIC = TRAIN_BASE_DIR + 'mood_classific.csv'
self.TEXT_LABEL_RESULT_TRAIN_DATA = '../data/train3/text_' + file_name_head + '_label.csv'
self.TEXT_CLASSIFICATION_DATA_SET = '../data/train/'
self.FINAL_RESULT_TRAIN_DATA = '../data/train/' + file_name_head + '_final_train.csv'
self.mood_data_df = pd.read_csv(self.MOOD_DATA_FILE_NAME)
self.IMAGE_OBJECT_FILE_NAME = '../data/train3/' + file_name_head + '_image_object.csv'
self.MOOD_DATA_AFTER_OBJECT = '../data/train/' + file_name_head + '_after_object.csv'
self.sc = SentimentClassify()
self.mood_data_df['score'] = '-1'
self.label_dict = {'1': '旅游与运动',
'2': '爱情与家庭',
'3': '学习与工作',
'4': '广告',
'5': '生活日常',
'6': '其他',
'7': '人生感悟'}
self.label_dict_reverse = {v: k for k, v in self.label_dict.items()}
# MASKED: calculate_score_for_each_mood function (lines 46-71)
def calculate_send_time(self):
"""
计算每条说说的发送时间
分为以下五种类型:
0.午夜:0点-4点
1.凌晨:4点-8点
2.上午:8点-12点
3.下午:12点-16点
4.傍晚:16点-20点
5.晚上:20点-24点
:return:
"""
day_begin_time = self.mood_data_df['time'].apply(lambda x: get_mktime2(x))
day_time_stamp = self.mood_data_df['time_stamp']
time_diff = day_time_stamp - day_begin_time
# 四个小时的时间差
time_step = 60 * 60 * 4
time_state = time_diff.apply(lambda x: x // time_step)
self.mood_data_df['time_state'] = time_state
print('send time:', self.mood_data_df.shape)
def export_df_after_clean(self):
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def export_train_text(self):
train_text = pd.read_csv(self.label_path + 'result/' + 'final.csv')
train_text = train_text[['type', 'content']]
train_text.columns = ['Y', 'content']
train_text.fillna('空', inplace=True)
train_text.Y = train_text.Y.apply(lambda x: self.label_dict[str(int(x))])
train_text.content = train_text.content.apply(lambda x: str(x).replace('\n', ''))
train_text.content = train_text.content.apply(lambda x: str(x).replace(' ', ''))
train_text.content = train_text.content.apply(lambda x: remove_waste_emoji(x))
train_text.fillna('空', inplace=True)
train_dataset = train_text.sample(frac=0.8)
val_dataset = train_text.sample(frac=0.3)
test_dataset = train_text.sample(frac=0.3)
self.print_label_dict(train_text)
self.print_label_dict(train_dataset)
self.print_label_dict(val_dataset)
self.print_label_dict(test_dataset)
train_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_train.csv', sep='\t', index=None, header=None)
val_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_val.csv', sep='\t', index=None, header=None)
test_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_test.csv', sep='\t', index=None, header=None)
self.calculate_avg_length(train_text)
# train_text.to_csv(self.TEXT_LABEL_TRAIN_DATA, sep=' ', index=None, header=None)
def calculate_avg_length(self, data_df):
num = data_df.shape[0]
content_list = data_df.content.sum()
print(len(content_list) / num)
def calculate_sentiment(self):
print("Begin to calculate sentiment...")
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace('\n', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace(' ', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: remove_waste_emoji(str(x)))
# 使用apply会导致超过qps限额
# sentiments = self.mood_data_df['content'].apply(lambda x: self.sc.get_sentiment_for_text(x))
# self.mood_data_df['sentiment'] = sentiments
self.mood_data_df['sentiments'] = -1
for i in range(self.mood_data_df.shape[0]):
content = self.mood_data_df.loc[i, 'content']
sentiment = self.sc.get_sentiment_for_text(content)
print('content:', content, 'senti:', sentiment)
self.mood_data_df.loc[i, 'sentiments'] = sentiment
self.mood_data_df = self.re_do_sentiment(self.mood_data_df)
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv('after_sentiment.csv')
print("text sentiment:", self.mood_data_df.shape)
def print_label_dict(self, data_df):
for item in self.label_dict.values():
print(item, data_df.loc[data_df.Y == item, :].shape[0])
print('==========')
def re_do_sentiment(self, data_df):
# data_df = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
for i in range(data_df.shape[0]):
sentiment = data_df.loc[i, 'sentiments']
content = data_df.loc[i, 'content']
if sentiment == -1:
content = content.replace('\u2207', '')
content = content.replace('\ue40c', '')
content = content.replace('\ue412', '')
content = content.replace('\ue056', '')
sentiment = self.sc.get_sentiment_for_text(str(content))
data_df.loc[i, 'sentiments'] = sentiment
data_df.to_csv(self.RE_DO_SENTIMENT_FILE_NAME)
return data_df
def export_classification_data(self):
"""
导出待分类待的数据
:return:
"""
data = pd.read_csv(self.RE_DO_SENTIMENT_FILE_NAME)
data_df = data[['content']]
data_df['Y'] = '旅游与运动'
data_df.fillna('空', inplace=True)
columns = ['Y', 'content']
data_df = data_df.ix[:, columns]
print(data_df.shape)
data_df.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_maicius.csv', sep='\t')
def combine_text_type_data(self):
data = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
print('mood_after_object_data:', data.shape)
label = pd.read_csv(self.TEXT_LABEL_RESULT_TRAIN_DATA)
print('label data:', label.shape)
label_y = label['Y']
data['type'] = label_y
data.to_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
def attach_image_object_for_each_mood(self):
with open('qq_big_image.json', 'r', encoding='utf-8') as r:
data = json.load(r)
with open('category.json', 'r', encoding='utf-8') as r:
category = json.load(r)
category_df = pd.DataFrame(category)
image_object_df = pd.DataFrame(
columns=['tid', 'person', 'vehicle', 'outdoor', 'animal', 'accessory', 'sports', 'kitchen', 'food',
'furniture',
'electronic', 'appliance', 'indoor'])
i = 0
for key, value in data.items():
tid = key.split('--')[0].split('/')[-1]
if image_object_df.loc[image_object_df.tid == tid].shape[0] == 0:
image_object_df.loc[i, 'tid'] = tid
i +=1
for item in value:
item = item.split(' ')[0]
super_cate = category_df.loc[category_df.name.str.contains(item), 'supercategory']
if len(super_cate) > 0:
print(super_cate)
image_object_df.loc[image_object_df.tid == tid, super_cate.values[0]] = 1
image_object_df.fillna(0, inplace=True)
image_object_df['vector'] = 0
image_object_df['vector'] = image_object_df['tid'].apply(lambda x: image_object_df.loc[image_object_df.tid == x,'person':].values[0])
image_object_df.to_csv(self.IMAGE_OBJECT_FILE_NAME)
def combine_image_object(self):
image_object_df = pd.read_csv(self.IMAGE_OBJECT_FILE_NAME)
mood_data_df = pd.read_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
try:
mood_data_df.drop(['vector'], axis=1, inplace=True)
except BaseException as e:
print(e)
image_object = image_object_df[['tid', 'vector']]
print(image_object_df.shape, mood_data_df.shape)
result = pd.merge(mood_data_df, image_object, on='tid', how='left')
print(result.shape)
result.to_csv(self.MOOD_DATA_AFTER_OBJECT)
def export_final_train_data(self):
data = pd.read_csv(self.MOOD_DATA_AFTER_OBJECT)
train = data[['n_E', 'score', 'time_state', 'sentiments', 'type', 'vector']]
train = train.loc[6:, :]
self.mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
train.score = train['score'].apply(lambda x: self.change_neg_image_score(x))
train.type = train['type'].map(self.label_dict_reverse)
train.vector.fillna('[0 0 0 0 0 0 0 0 0 0 0 0 0]', inplace=True)
train.vector = train.vector.apply(lambda x: self.change_vector_to_int(x))
train.sort_values(by='n_E', inplace=True, ascending=False)
train.to_csv(self.FINAL_RESULT_TRAIN_DATA)
def change_neg_image_score(self, score):
if score == -1:
return self.mean_score
else:
return score
def change_vector_to_int(self, vector):
vector = re.findall(re.compile('[0-9]'), vector)
str_vector = "".join(vector)
sum = 0
length = len(str_vector)
for i in range(length):
sum += int(str_vector[i]) **(length - 1)
return sum
def remove_waste_emoji(text):
text = re.subn(re.compile('\[em\].*?\[\/em\]'), '', text)[0]
text = re.subn(re.compile('@\{.*?\}'), '', text)[0]
return text
if __name__ == '__main__':
train = TrainMood(use_redis=True, debug=True, file_name_head='maicius')
# train.calculate_score_for_each_mood()
# train.calculate_send_time()
# train.calculate_sentiment()
# train.export_df_after_clean()
train.export_train_text()
# train.export_classification_data()
# train.attach_image_object_for_each_mood()
# train.combine_text_type_data()
# train.combine_image_object()
# train.export_final_train_data()
|
def calculate_score_for_each_mood(self):
"""
利用谷歌nima模型对图片进行评分
paper: https://arxiv.org/abs/1709.05424
pytorch model: https://github.com/truskovskiyk/nima.pytorch.git
计算每条说说中图片的平均分
对于没有图片的按均值进行填充
:return:
"""
# nima模型预测结果文件
self.IMAGE_SCORE_FILE_PATH = '/Users/maicius/code/nima.pytorch/nima/result_dict.json'
with open(self.IMAGE_SCORE_FILE_PATH, 'r', encoding='utf-8') as r:
self.image_score_dict = json.load(r)
self.image_score_df = pd.DataFrame(self.image_score_dict)
mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
self.image_score_df.loc[self.image_score_df.score == -1, 'score'] = mean_score
tid_list = self.mood_data_df['tid'].values
for tid in tid_list:
scores = self.image_score_df[self.image_score_df.image.str.contains(tid)].score
if len(scores) > 0:
self.mood_data_df.loc[self.mood_data_df.tid == tid, 'score'] = round(scores.mean(), 2)
self.mood_data_df.fillna(mean_score)
print("score shape:", self.mood_data_df.shape)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
| 46
| 71
|
from src.analysis.QQZoneAnalysis import QQZoneAnalysis
import json
from src.util.constant import BASE_DIR
from src.util.util import get_mktime2
import pandas as pd
import re
from src.analysis.SentimentClassify import SentimentClassify
class TrainMood(QQZoneAnalysis):
"""
生成各种训练需要的数据集
"""
def __init__(self, use_redis=False, debug=True, file_name_head=''):
QQZoneAnalysis.__init__(self, use_redis=use_redis, debug=debug, username=file_name_head, analysis_friend=False)
TRAIN_BASE_DIR = BASE_DIR + file_name_head + '/data/train/'
self.MOOD_DATA_SCORE_FILE_NAME = TRAIN_BASE_DIR + 'score_mood_data.csv'
self.RE_DO_SENTIMENT_FILE_NAME = TRAIN_BASE_DIR + 're_do_mood_data.csv'
self.TEXT_LABEL_TRAIN_DATA = TRAIN_BASE_DIR + 'mood_text.csv'
self.TRAIN_DATA_AFTER_CLASSIFIC = TRAIN_BASE_DIR + 'mood_classific.csv'
self.TEXT_LABEL_RESULT_TRAIN_DATA = '../data/train3/text_' + file_name_head + '_label.csv'
self.TEXT_CLASSIFICATION_DATA_SET = '../data/train/'
self.FINAL_RESULT_TRAIN_DATA = '../data/train/' + file_name_head + '_final_train.csv'
self.mood_data_df = pd.read_csv(self.MOOD_DATA_FILE_NAME)
self.IMAGE_OBJECT_FILE_NAME = '../data/train3/' + file_name_head + '_image_object.csv'
self.MOOD_DATA_AFTER_OBJECT = '../data/train/' + file_name_head + '_after_object.csv'
self.sc = SentimentClassify()
self.mood_data_df['score'] = '-1'
self.label_dict = {'1': '旅游与运动',
'2': '爱情与家庭',
'3': '学习与工作',
'4': '广告',
'5': '生活日常',
'6': '其他',
'7': '人生感悟'}
self.label_dict_reverse = {v: k for k, v in self.label_dict.items()}
def calculate_score_for_each_mood(self):
"""
利用谷歌nima模型对图片进行评分
paper: https://arxiv.org/abs/1709.05424
pytorch model: https://github.com/truskovskiyk/nima.pytorch.git
计算每条说说中图片的平均分
对于没有图片的按均值进行填充
:return:
"""
# nima模型预测结果文件
self.IMAGE_SCORE_FILE_PATH = '/Users/maicius/code/nima.pytorch/nima/result_dict.json'
with open(self.IMAGE_SCORE_FILE_PATH, 'r', encoding='utf-8') as r:
self.image_score_dict = json.load(r)
self.image_score_df = pd.DataFrame(self.image_score_dict)
mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
self.image_score_df.loc[self.image_score_df.score == -1, 'score'] = mean_score
tid_list = self.mood_data_df['tid'].values
for tid in tid_list:
scores = self.image_score_df[self.image_score_df.image.str.contains(tid)].score
if len(scores) > 0:
self.mood_data_df.loc[self.mood_data_df.tid == tid, 'score'] = round(scores.mean(), 2)
self.mood_data_df.fillna(mean_score)
print("score shape:", self.mood_data_df.shape)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def calculate_send_time(self):
"""
计算每条说说的发送时间
分为以下五种类型:
0.午夜:0点-4点
1.凌晨:4点-8点
2.上午:8点-12点
3.下午:12点-16点
4.傍晚:16点-20点
5.晚上:20点-24点
:return:
"""
day_begin_time = self.mood_data_df['time'].apply(lambda x: get_mktime2(x))
day_time_stamp = self.mood_data_df['time_stamp']
time_diff = day_time_stamp - day_begin_time
# 四个小时的时间差
time_step = 60 * 60 * 4
time_state = time_diff.apply(lambda x: x // time_step)
self.mood_data_df['time_state'] = time_state
print('send time:', self.mood_data_df.shape)
def export_df_after_clean(self):
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def export_train_text(self):
train_text = pd.read_csv(self.label_path + 'result/' + 'final.csv')
train_text = train_text[['type', 'content']]
train_text.columns = ['Y', 'content']
train_text.fillna('空', inplace=True)
train_text.Y = train_text.Y.apply(lambda x: self.label_dict[str(int(x))])
train_text.content = train_text.content.apply(lambda x: str(x).replace('\n', ''))
train_text.content = train_text.content.apply(lambda x: str(x).replace(' ', ''))
train_text.content = train_text.content.apply(lambda x: remove_waste_emoji(x))
train_text.fillna('空', inplace=True)
train_dataset = train_text.sample(frac=0.8)
val_dataset = train_text.sample(frac=0.3)
test_dataset = train_text.sample(frac=0.3)
self.print_label_dict(train_text)
self.print_label_dict(train_dataset)
self.print_label_dict(val_dataset)
self.print_label_dict(test_dataset)
train_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_train.csv', sep='\t', index=None, header=None)
val_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_val.csv', sep='\t', index=None, header=None)
test_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_test.csv', sep='\t', index=None, header=None)
self.calculate_avg_length(train_text)
# train_text.to_csv(self.TEXT_LABEL_TRAIN_DATA, sep=' ', index=None, header=None)
def calculate_avg_length(self, data_df):
num = data_df.shape[0]
content_list = data_df.content.sum()
print(len(content_list) / num)
def calculate_sentiment(self):
print("Begin to calculate sentiment...")
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace('\n', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace(' ', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: remove_waste_emoji(str(x)))
# 使用apply会导致超过qps限额
# sentiments = self.mood_data_df['content'].apply(lambda x: self.sc.get_sentiment_for_text(x))
# self.mood_data_df['sentiment'] = sentiments
self.mood_data_df['sentiments'] = -1
for i in range(self.mood_data_df.shape[0]):
content = self.mood_data_df.loc[i, 'content']
sentiment = self.sc.get_sentiment_for_text(content)
print('content:', content, 'senti:', sentiment)
self.mood_data_df.loc[i, 'sentiments'] = sentiment
self.mood_data_df = self.re_do_sentiment(self.mood_data_df)
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv('after_sentiment.csv')
print("text sentiment:", self.mood_data_df.shape)
def print_label_dict(self, data_df):
for item in self.label_dict.values():
print(item, data_df.loc[data_df.Y == item, :].shape[0])
print('==========')
def re_do_sentiment(self, data_df):
# data_df = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
for i in range(data_df.shape[0]):
sentiment = data_df.loc[i, 'sentiments']
content = data_df.loc[i, 'content']
if sentiment == -1:
content = content.replace('\u2207', '')
content = content.replace('\ue40c', '')
content = content.replace('\ue412', '')
content = content.replace('\ue056', '')
sentiment = self.sc.get_sentiment_for_text(str(content))
data_df.loc[i, 'sentiments'] = sentiment
data_df.to_csv(self.RE_DO_SENTIMENT_FILE_NAME)
return data_df
def export_classification_data(self):
"""
导出待分类待的数据
:return:
"""
data = pd.read_csv(self.RE_DO_SENTIMENT_FILE_NAME)
data_df = data[['content']]
data_df['Y'] = '旅游与运动'
data_df.fillna('空', inplace=True)
columns = ['Y', 'content']
data_df = data_df.ix[:, columns]
print(data_df.shape)
data_df.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_maicius.csv', sep='\t')
def combine_text_type_data(self):
data = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
print('mood_after_object_data:', data.shape)
label = pd.read_csv(self.TEXT_LABEL_RESULT_TRAIN_DATA)
print('label data:', label.shape)
label_y = label['Y']
data['type'] = label_y
data.to_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
def attach_image_object_for_each_mood(self):
with open('qq_big_image.json', 'r', encoding='utf-8') as r:
data = json.load(r)
with open('category.json', 'r', encoding='utf-8') as r:
category = json.load(r)
category_df = pd.DataFrame(category)
image_object_df = pd.DataFrame(
columns=['tid', 'person', 'vehicle', 'outdoor', 'animal', 'accessory', 'sports', 'kitchen', 'food',
'furniture',
'electronic', 'appliance', 'indoor'])
i = 0
for key, value in data.items():
tid = key.split('--')[0].split('/')[-1]
if image_object_df.loc[image_object_df.tid == tid].shape[0] == 0:
image_object_df.loc[i, 'tid'] = tid
i +=1
for item in value:
item = item.split(' ')[0]
super_cate = category_df.loc[category_df.name.str.contains(item), 'supercategory']
if len(super_cate) > 0:
print(super_cate)
image_object_df.loc[image_object_df.tid == tid, super_cate.values[0]] = 1
image_object_df.fillna(0, inplace=True)
image_object_df['vector'] = 0
image_object_df['vector'] = image_object_df['tid'].apply(lambda x: image_object_df.loc[image_object_df.tid == x,'person':].values[0])
image_object_df.to_csv(self.IMAGE_OBJECT_FILE_NAME)
def combine_image_object(self):
image_object_df = pd.read_csv(self.IMAGE_OBJECT_FILE_NAME)
mood_data_df = pd.read_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
try:
mood_data_df.drop(['vector'], axis=1, inplace=True)
except BaseException as e:
print(e)
image_object = image_object_df[['tid', 'vector']]
print(image_object_df.shape, mood_data_df.shape)
result = pd.merge(mood_data_df, image_object, on='tid', how='left')
print(result.shape)
result.to_csv(self.MOOD_DATA_AFTER_OBJECT)
def export_final_train_data(self):
data = pd.read_csv(self.MOOD_DATA_AFTER_OBJECT)
train = data[['n_E', 'score', 'time_state', 'sentiments', 'type', 'vector']]
train = train.loc[6:, :]
self.mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
train.score = train['score'].apply(lambda x: self.change_neg_image_score(x))
train.type = train['type'].map(self.label_dict_reverse)
train.vector.fillna('[0 0 0 0 0 0 0 0 0 0 0 0 0]', inplace=True)
train.vector = train.vector.apply(lambda x: self.change_vector_to_int(x))
train.sort_values(by='n_E', inplace=True, ascending=False)
train.to_csv(self.FINAL_RESULT_TRAIN_DATA)
def change_neg_image_score(self, score):
if score == -1:
return self.mean_score
else:
return score
def change_vector_to_int(self, vector):
vector = re.findall(re.compile('[0-9]'), vector)
str_vector = "".join(vector)
sum = 0
length = len(str_vector)
for i in range(length):
sum += int(str_vector[i]) **(length - 1)
return sum
def remove_waste_emoji(text):
text = re.subn(re.compile('\[em\].*?\[\/em\]'), '', text)[0]
text = re.subn(re.compile('@\{.*?\}'), '', text)[0]
return text
if __name__ == '__main__':
train = TrainMood(use_redis=True, debug=True, file_name_head='maicius')
# train.calculate_score_for_each_mood()
# train.calculate_send_time()
# train.calculate_sentiment()
# train.export_df_after_clean()
train.export_train_text()
# train.export_classification_data()
# train.attach_image_object_for_each_mood()
# train.combine_text_type_data()
# train.combine_image_object()
# train.export_final_train_data()
|
calculate_send_time
|
计算每条说说的发送时间
分为以下五种类型:
0.午夜:0点-4点
1.凌晨:4点-8点
2.上午:8点-12点
3.下午:12点-16点
4.傍晚:16点-20点
5.晚上:20点-24点
:return:
|
from src.analysis.QQZoneAnalysis import QQZoneAnalysis
import json
from src.util.constant import BASE_DIR
from src.util.util import get_mktime2
import pandas as pd
import re
from src.analysis.SentimentClassify import SentimentClassify
class TrainMood(QQZoneAnalysis):
"""
生成各种训练需要的数据集
"""
def __init__(self, use_redis=False, debug=True, file_name_head=''):
QQZoneAnalysis.__init__(self, use_redis=use_redis, debug=debug, username=file_name_head, analysis_friend=False)
TRAIN_BASE_DIR = BASE_DIR + file_name_head + '/data/train/'
self.MOOD_DATA_SCORE_FILE_NAME = TRAIN_BASE_DIR + 'score_mood_data.csv'
self.RE_DO_SENTIMENT_FILE_NAME = TRAIN_BASE_DIR + 're_do_mood_data.csv'
self.TEXT_LABEL_TRAIN_DATA = TRAIN_BASE_DIR + 'mood_text.csv'
self.TRAIN_DATA_AFTER_CLASSIFIC = TRAIN_BASE_DIR + 'mood_classific.csv'
self.TEXT_LABEL_RESULT_TRAIN_DATA = '../data/train3/text_' + file_name_head + '_label.csv'
self.TEXT_CLASSIFICATION_DATA_SET = '../data/train/'
self.FINAL_RESULT_TRAIN_DATA = '../data/train/' + file_name_head + '_final_train.csv'
self.mood_data_df = pd.read_csv(self.MOOD_DATA_FILE_NAME)
self.IMAGE_OBJECT_FILE_NAME = '../data/train3/' + file_name_head + '_image_object.csv'
self.MOOD_DATA_AFTER_OBJECT = '../data/train/' + file_name_head + '_after_object.csv'
self.sc = SentimentClassify()
self.mood_data_df['score'] = '-1'
self.label_dict = {'1': '旅游与运动',
'2': '爱情与家庭',
'3': '学习与工作',
'4': '广告',
'5': '生活日常',
'6': '其他',
'7': '人生感悟'}
self.label_dict_reverse = {v: k for k, v in self.label_dict.items()}
def calculate_score_for_each_mood(self):
"""
利用谷歌nima模型对图片进行评分
paper: https://arxiv.org/abs/1709.05424
pytorch model: https://github.com/truskovskiyk/nima.pytorch.git
计算每条说说中图片的平均分
对于没有图片的按均值进行填充
:return:
"""
# nima模型预测结果文件
self.IMAGE_SCORE_FILE_PATH = '/Users/maicius/code/nima.pytorch/nima/result_dict.json'
with open(self.IMAGE_SCORE_FILE_PATH, 'r', encoding='utf-8') as r:
self.image_score_dict = json.load(r)
self.image_score_df = pd.DataFrame(self.image_score_dict)
mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
self.image_score_df.loc[self.image_score_df.score == -1, 'score'] = mean_score
tid_list = self.mood_data_df['tid'].values
for tid in tid_list:
scores = self.image_score_df[self.image_score_df.image.str.contains(tid)].score
if len(scores) > 0:
self.mood_data_df.loc[self.mood_data_df.tid == tid, 'score'] = round(scores.mean(), 2)
self.mood_data_df.fillna(mean_score)
print("score shape:", self.mood_data_df.shape)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
# MASKED: calculate_send_time function (lines 73-92)
def export_df_after_clean(self):
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def export_train_text(self):
train_text = pd.read_csv(self.label_path + 'result/' + 'final.csv')
train_text = train_text[['type', 'content']]
train_text.columns = ['Y', 'content']
train_text.fillna('空', inplace=True)
train_text.Y = train_text.Y.apply(lambda x: self.label_dict[str(int(x))])
train_text.content = train_text.content.apply(lambda x: str(x).replace('\n', ''))
train_text.content = train_text.content.apply(lambda x: str(x).replace(' ', ''))
train_text.content = train_text.content.apply(lambda x: remove_waste_emoji(x))
train_text.fillna('空', inplace=True)
train_dataset = train_text.sample(frac=0.8)
val_dataset = train_text.sample(frac=0.3)
test_dataset = train_text.sample(frac=0.3)
self.print_label_dict(train_text)
self.print_label_dict(train_dataset)
self.print_label_dict(val_dataset)
self.print_label_dict(test_dataset)
train_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_train.csv', sep='\t', index=None, header=None)
val_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_val.csv', sep='\t', index=None, header=None)
test_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_test.csv', sep='\t', index=None, header=None)
self.calculate_avg_length(train_text)
# train_text.to_csv(self.TEXT_LABEL_TRAIN_DATA, sep=' ', index=None, header=None)
def calculate_avg_length(self, data_df):
num = data_df.shape[0]
content_list = data_df.content.sum()
print(len(content_list) / num)
def calculate_sentiment(self):
print("Begin to calculate sentiment...")
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace('\n', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace(' ', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: remove_waste_emoji(str(x)))
# 使用apply会导致超过qps限额
# sentiments = self.mood_data_df['content'].apply(lambda x: self.sc.get_sentiment_for_text(x))
# self.mood_data_df['sentiment'] = sentiments
self.mood_data_df['sentiments'] = -1
for i in range(self.mood_data_df.shape[0]):
content = self.mood_data_df.loc[i, 'content']
sentiment = self.sc.get_sentiment_for_text(content)
print('content:', content, 'senti:', sentiment)
self.mood_data_df.loc[i, 'sentiments'] = sentiment
self.mood_data_df = self.re_do_sentiment(self.mood_data_df)
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv('after_sentiment.csv')
print("text sentiment:", self.mood_data_df.shape)
def print_label_dict(self, data_df):
for item in self.label_dict.values():
print(item, data_df.loc[data_df.Y == item, :].shape[0])
print('==========')
def re_do_sentiment(self, data_df):
# data_df = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
for i in range(data_df.shape[0]):
sentiment = data_df.loc[i, 'sentiments']
content = data_df.loc[i, 'content']
if sentiment == -1:
content = content.replace('\u2207', '')
content = content.replace('\ue40c', '')
content = content.replace('\ue412', '')
content = content.replace('\ue056', '')
sentiment = self.sc.get_sentiment_for_text(str(content))
data_df.loc[i, 'sentiments'] = sentiment
data_df.to_csv(self.RE_DO_SENTIMENT_FILE_NAME)
return data_df
def export_classification_data(self):
"""
导出待分类待的数据
:return:
"""
data = pd.read_csv(self.RE_DO_SENTIMENT_FILE_NAME)
data_df = data[['content']]
data_df['Y'] = '旅游与运动'
data_df.fillna('空', inplace=True)
columns = ['Y', 'content']
data_df = data_df.ix[:, columns]
print(data_df.shape)
data_df.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_maicius.csv', sep='\t')
def combine_text_type_data(self):
data = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
print('mood_after_object_data:', data.shape)
label = pd.read_csv(self.TEXT_LABEL_RESULT_TRAIN_DATA)
print('label data:', label.shape)
label_y = label['Y']
data['type'] = label_y
data.to_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
def attach_image_object_for_each_mood(self):
with open('qq_big_image.json', 'r', encoding='utf-8') as r:
data = json.load(r)
with open('category.json', 'r', encoding='utf-8') as r:
category = json.load(r)
category_df = pd.DataFrame(category)
image_object_df = pd.DataFrame(
columns=['tid', 'person', 'vehicle', 'outdoor', 'animal', 'accessory', 'sports', 'kitchen', 'food',
'furniture',
'electronic', 'appliance', 'indoor'])
i = 0
for key, value in data.items():
tid = key.split('--')[0].split('/')[-1]
if image_object_df.loc[image_object_df.tid == tid].shape[0] == 0:
image_object_df.loc[i, 'tid'] = tid
i +=1
for item in value:
item = item.split(' ')[0]
super_cate = category_df.loc[category_df.name.str.contains(item), 'supercategory']
if len(super_cate) > 0:
print(super_cate)
image_object_df.loc[image_object_df.tid == tid, super_cate.values[0]] = 1
image_object_df.fillna(0, inplace=True)
image_object_df['vector'] = 0
image_object_df['vector'] = image_object_df['tid'].apply(lambda x: image_object_df.loc[image_object_df.tid == x,'person':].values[0])
image_object_df.to_csv(self.IMAGE_OBJECT_FILE_NAME)
def combine_image_object(self):
image_object_df = pd.read_csv(self.IMAGE_OBJECT_FILE_NAME)
mood_data_df = pd.read_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
try:
mood_data_df.drop(['vector'], axis=1, inplace=True)
except BaseException as e:
print(e)
image_object = image_object_df[['tid', 'vector']]
print(image_object_df.shape, mood_data_df.shape)
result = pd.merge(mood_data_df, image_object, on='tid', how='left')
print(result.shape)
result.to_csv(self.MOOD_DATA_AFTER_OBJECT)
def export_final_train_data(self):
data = pd.read_csv(self.MOOD_DATA_AFTER_OBJECT)
train = data[['n_E', 'score', 'time_state', 'sentiments', 'type', 'vector']]
train = train.loc[6:, :]
self.mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
train.score = train['score'].apply(lambda x: self.change_neg_image_score(x))
train.type = train['type'].map(self.label_dict_reverse)
train.vector.fillna('[0 0 0 0 0 0 0 0 0 0 0 0 0]', inplace=True)
train.vector = train.vector.apply(lambda x: self.change_vector_to_int(x))
train.sort_values(by='n_E', inplace=True, ascending=False)
train.to_csv(self.FINAL_RESULT_TRAIN_DATA)
def change_neg_image_score(self, score):
if score == -1:
return self.mean_score
else:
return score
def change_vector_to_int(self, vector):
vector = re.findall(re.compile('[0-9]'), vector)
str_vector = "".join(vector)
sum = 0
length = len(str_vector)
for i in range(length):
sum += int(str_vector[i]) **(length - 1)
return sum
def remove_waste_emoji(text):
text = re.subn(re.compile('\[em\].*?\[\/em\]'), '', text)[0]
text = re.subn(re.compile('@\{.*?\}'), '', text)[0]
return text
if __name__ == '__main__':
train = TrainMood(use_redis=True, debug=True, file_name_head='maicius')
# train.calculate_score_for_each_mood()
# train.calculate_send_time()
# train.calculate_sentiment()
# train.export_df_after_clean()
train.export_train_text()
# train.export_classification_data()
# train.attach_image_object_for_each_mood()
# train.combine_text_type_data()
# train.combine_image_object()
# train.export_final_train_data()
|
def calculate_send_time(self):
"""
计算每条说说的发送时间
分为以下五种类型:
0.午夜:0点-4点
1.凌晨:4点-8点
2.上午:8点-12点
3.下午:12点-16点
4.傍晚:16点-20点
5.晚上:20点-24点
:return:
"""
day_begin_time = self.mood_data_df['time'].apply(lambda x: get_mktime2(x))
day_time_stamp = self.mood_data_df['time_stamp']
time_diff = day_time_stamp - day_begin_time
# 四个小时的时间差
time_step = 60 * 60 * 4
time_state = time_diff.apply(lambda x: x // time_step)
self.mood_data_df['time_state'] = time_state
print('send time:', self.mood_data_df.shape)
| 73
| 92
|
from src.analysis.QQZoneAnalysis import QQZoneAnalysis
import json
from src.util.constant import BASE_DIR
from src.util.util import get_mktime2
import pandas as pd
import re
from src.analysis.SentimentClassify import SentimentClassify
class TrainMood(QQZoneAnalysis):
"""
生成各种训练需要的数据集
"""
def __init__(self, use_redis=False, debug=True, file_name_head=''):
QQZoneAnalysis.__init__(self, use_redis=use_redis, debug=debug, username=file_name_head, analysis_friend=False)
TRAIN_BASE_DIR = BASE_DIR + file_name_head + '/data/train/'
self.MOOD_DATA_SCORE_FILE_NAME = TRAIN_BASE_DIR + 'score_mood_data.csv'
self.RE_DO_SENTIMENT_FILE_NAME = TRAIN_BASE_DIR + 're_do_mood_data.csv'
self.TEXT_LABEL_TRAIN_DATA = TRAIN_BASE_DIR + 'mood_text.csv'
self.TRAIN_DATA_AFTER_CLASSIFIC = TRAIN_BASE_DIR + 'mood_classific.csv'
self.TEXT_LABEL_RESULT_TRAIN_DATA = '../data/train3/text_' + file_name_head + '_label.csv'
self.TEXT_CLASSIFICATION_DATA_SET = '../data/train/'
self.FINAL_RESULT_TRAIN_DATA = '../data/train/' + file_name_head + '_final_train.csv'
self.mood_data_df = pd.read_csv(self.MOOD_DATA_FILE_NAME)
self.IMAGE_OBJECT_FILE_NAME = '../data/train3/' + file_name_head + '_image_object.csv'
self.MOOD_DATA_AFTER_OBJECT = '../data/train/' + file_name_head + '_after_object.csv'
self.sc = SentimentClassify()
self.mood_data_df['score'] = '-1'
self.label_dict = {'1': '旅游与运动',
'2': '爱情与家庭',
'3': '学习与工作',
'4': '广告',
'5': '生活日常',
'6': '其他',
'7': '人生感悟'}
self.label_dict_reverse = {v: k for k, v in self.label_dict.items()}
def calculate_score_for_each_mood(self):
"""
利用谷歌nima模型对图片进行评分
paper: https://arxiv.org/abs/1709.05424
pytorch model: https://github.com/truskovskiyk/nima.pytorch.git
计算每条说说中图片的平均分
对于没有图片的按均值进行填充
:return:
"""
# nima模型预测结果文件
self.IMAGE_SCORE_FILE_PATH = '/Users/maicius/code/nima.pytorch/nima/result_dict.json'
with open(self.IMAGE_SCORE_FILE_PATH, 'r', encoding='utf-8') as r:
self.image_score_dict = json.load(r)
self.image_score_df = pd.DataFrame(self.image_score_dict)
mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
self.image_score_df.loc[self.image_score_df.score == -1, 'score'] = mean_score
tid_list = self.mood_data_df['tid'].values
for tid in tid_list:
scores = self.image_score_df[self.image_score_df.image.str.contains(tid)].score
if len(scores) > 0:
self.mood_data_df.loc[self.mood_data_df.tid == tid, 'score'] = round(scores.mean(), 2)
self.mood_data_df.fillna(mean_score)
print("score shape:", self.mood_data_df.shape)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def calculate_send_time(self):
"""
计算每条说说的发送时间
分为以下五种类型:
0.午夜:0点-4点
1.凌晨:4点-8点
2.上午:8点-12点
3.下午:12点-16点
4.傍晚:16点-20点
5.晚上:20点-24点
:return:
"""
day_begin_time = self.mood_data_df['time'].apply(lambda x: get_mktime2(x))
day_time_stamp = self.mood_data_df['time_stamp']
time_diff = day_time_stamp - day_begin_time
# 四个小时的时间差
time_step = 60 * 60 * 4
time_state = time_diff.apply(lambda x: x // time_step)
self.mood_data_df['time_state'] = time_state
print('send time:', self.mood_data_df.shape)
def export_df_after_clean(self):
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def export_train_text(self):
train_text = pd.read_csv(self.label_path + 'result/' + 'final.csv')
train_text = train_text[['type', 'content']]
train_text.columns = ['Y', 'content']
train_text.fillna('空', inplace=True)
train_text.Y = train_text.Y.apply(lambda x: self.label_dict[str(int(x))])
train_text.content = train_text.content.apply(lambda x: str(x).replace('\n', ''))
train_text.content = train_text.content.apply(lambda x: str(x).replace(' ', ''))
train_text.content = train_text.content.apply(lambda x: remove_waste_emoji(x))
train_text.fillna('空', inplace=True)
train_dataset = train_text.sample(frac=0.8)
val_dataset = train_text.sample(frac=0.3)
test_dataset = train_text.sample(frac=0.3)
self.print_label_dict(train_text)
self.print_label_dict(train_dataset)
self.print_label_dict(val_dataset)
self.print_label_dict(test_dataset)
train_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_train.csv', sep='\t', index=None, header=None)
val_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_val.csv', sep='\t', index=None, header=None)
test_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_test.csv', sep='\t', index=None, header=None)
self.calculate_avg_length(train_text)
# train_text.to_csv(self.TEXT_LABEL_TRAIN_DATA, sep=' ', index=None, header=None)
def calculate_avg_length(self, data_df):
num = data_df.shape[0]
content_list = data_df.content.sum()
print(len(content_list) / num)
def calculate_sentiment(self):
print("Begin to calculate sentiment...")
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace('\n', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace(' ', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: remove_waste_emoji(str(x)))
# 使用apply会导致超过qps限额
# sentiments = self.mood_data_df['content'].apply(lambda x: self.sc.get_sentiment_for_text(x))
# self.mood_data_df['sentiment'] = sentiments
self.mood_data_df['sentiments'] = -1
for i in range(self.mood_data_df.shape[0]):
content = self.mood_data_df.loc[i, 'content']
sentiment = self.sc.get_sentiment_for_text(content)
print('content:', content, 'senti:', sentiment)
self.mood_data_df.loc[i, 'sentiments'] = sentiment
self.mood_data_df = self.re_do_sentiment(self.mood_data_df)
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv('after_sentiment.csv')
print("text sentiment:", self.mood_data_df.shape)
def print_label_dict(self, data_df):
for item in self.label_dict.values():
print(item, data_df.loc[data_df.Y == item, :].shape[0])
print('==========')
def re_do_sentiment(self, data_df):
# data_df = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
for i in range(data_df.shape[0]):
sentiment = data_df.loc[i, 'sentiments']
content = data_df.loc[i, 'content']
if sentiment == -1:
content = content.replace('\u2207', '')
content = content.replace('\ue40c', '')
content = content.replace('\ue412', '')
content = content.replace('\ue056', '')
sentiment = self.sc.get_sentiment_for_text(str(content))
data_df.loc[i, 'sentiments'] = sentiment
data_df.to_csv(self.RE_DO_SENTIMENT_FILE_NAME)
return data_df
def export_classification_data(self):
"""
导出待分类待的数据
:return:
"""
data = pd.read_csv(self.RE_DO_SENTIMENT_FILE_NAME)
data_df = data[['content']]
data_df['Y'] = '旅游与运动'
data_df.fillna('空', inplace=True)
columns = ['Y', 'content']
data_df = data_df.ix[:, columns]
print(data_df.shape)
data_df.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_maicius.csv', sep='\t')
def combine_text_type_data(self):
data = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
print('mood_after_object_data:', data.shape)
label = pd.read_csv(self.TEXT_LABEL_RESULT_TRAIN_DATA)
print('label data:', label.shape)
label_y = label['Y']
data['type'] = label_y
data.to_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
def attach_image_object_for_each_mood(self):
with open('qq_big_image.json', 'r', encoding='utf-8') as r:
data = json.load(r)
with open('category.json', 'r', encoding='utf-8') as r:
category = json.load(r)
category_df = pd.DataFrame(category)
image_object_df = pd.DataFrame(
columns=['tid', 'person', 'vehicle', 'outdoor', 'animal', 'accessory', 'sports', 'kitchen', 'food',
'furniture',
'electronic', 'appliance', 'indoor'])
i = 0
for key, value in data.items():
tid = key.split('--')[0].split('/')[-1]
if image_object_df.loc[image_object_df.tid == tid].shape[0] == 0:
image_object_df.loc[i, 'tid'] = tid
i +=1
for item in value:
item = item.split(' ')[0]
super_cate = category_df.loc[category_df.name.str.contains(item), 'supercategory']
if len(super_cate) > 0:
print(super_cate)
image_object_df.loc[image_object_df.tid == tid, super_cate.values[0]] = 1
image_object_df.fillna(0, inplace=True)
image_object_df['vector'] = 0
image_object_df['vector'] = image_object_df['tid'].apply(lambda x: image_object_df.loc[image_object_df.tid == x,'person':].values[0])
image_object_df.to_csv(self.IMAGE_OBJECT_FILE_NAME)
def combine_image_object(self):
image_object_df = pd.read_csv(self.IMAGE_OBJECT_FILE_NAME)
mood_data_df = pd.read_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
try:
mood_data_df.drop(['vector'], axis=1, inplace=True)
except BaseException as e:
print(e)
image_object = image_object_df[['tid', 'vector']]
print(image_object_df.shape, mood_data_df.shape)
result = pd.merge(mood_data_df, image_object, on='tid', how='left')
print(result.shape)
result.to_csv(self.MOOD_DATA_AFTER_OBJECT)
def export_final_train_data(self):
data = pd.read_csv(self.MOOD_DATA_AFTER_OBJECT)
train = data[['n_E', 'score', 'time_state', 'sentiments', 'type', 'vector']]
train = train.loc[6:, :]
self.mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
train.score = train['score'].apply(lambda x: self.change_neg_image_score(x))
train.type = train['type'].map(self.label_dict_reverse)
train.vector.fillna('[0 0 0 0 0 0 0 0 0 0 0 0 0]', inplace=True)
train.vector = train.vector.apply(lambda x: self.change_vector_to_int(x))
train.sort_values(by='n_E', inplace=True, ascending=False)
train.to_csv(self.FINAL_RESULT_TRAIN_DATA)
def change_neg_image_score(self, score):
if score == -1:
return self.mean_score
else:
return score
def change_vector_to_int(self, vector):
vector = re.findall(re.compile('[0-9]'), vector)
str_vector = "".join(vector)
sum = 0
length = len(str_vector)
for i in range(length):
sum += int(str_vector[i]) **(length - 1)
return sum
def remove_waste_emoji(text):
text = re.subn(re.compile('\[em\].*?\[\/em\]'), '', text)[0]
text = re.subn(re.compile('@\{.*?\}'), '', text)[0]
return text
if __name__ == '__main__':
train = TrainMood(use_redis=True, debug=True, file_name_head='maicius')
# train.calculate_score_for_each_mood()
# train.calculate_send_time()
# train.calculate_sentiment()
# train.export_df_after_clean()
train.export_train_text()
# train.export_classification_data()
# train.attach_image_object_for_each_mood()
# train.combine_text_type_data()
# train.combine_image_object()
# train.export_final_train_data()
|
create_namespaced_job
|
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
# MASKED: create_namespaced_job function (lines 38-61)
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
| 38
| 61
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
create_namespaced_job_with_http_info
|
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
# MASKED: create_namespaced_job_with_http_info function (lines 63-151)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 63
| 151
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
get_api_resources
|
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
# MASKED: get_api_resources function (lines 412-430)
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
| 412
| 430
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
get_api_resources_with_http_info
|
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
# MASKED: get_api_resources_with_http_info function (lines 432-498)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 432
| 498
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
read_namespaced_job
|
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
# MASKED: read_namespaced_job function (lines 993-1016)
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
| 993
| 1,016
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
read_namespaced_job_with_http_info
|
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
# MASKED: read_namespaced_job_with_http_info function (lines 1018-1106)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 1,018
| 1,106
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.