code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import logging
import os
from typing import List, Optional, Set, Tuple
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql # noqa
from sqlalchemy.orm import sessionmaker
from .constants import (
BUILTIN_SCHEMAS,
DEFAULT_SCHEMA,
DELETE,
LOGICAL_SLOT_PREFIX,
LOGICAL_SLOT_SUFFIX,
MATERIALIZED_VIEW,
PLUGIN,
TG_OP,
TRIGGER_FUNC,
UPDATE,
)
from .exc import (
LogicalSlotParseError,
ReplicationSlotError,
TableNotFoundError,
)
from .settings import (
PG_SSLMODE,
PG_SSLROOTCERT,
QUERY_CHUNK_SIZE,
STREAM_RESULTS,
)
from .trigger import CREATE_TRIGGER_TEMPLATE
from .urls import get_postgres_url
from .utils import compiled_query
from .view import create_view, DropView, is_view, RefreshView
try:
import citext # noqa
except ImportError:
pass
try:
import geoalchemy2 # noqa
except ImportError:
pass
logger = logging.getLogger(__name__)
class Payload(object):
__slots__ = ("tg_op", "table", "schema", "old", "new", "xmin", "indices")
def __init__(
self,
tg_op: str = Optional[None],
table: str = Optional[None],
schema: str = Optional[None],
old: dict = Optional[None],
new: dict = Optional[None],
xmin: int = Optional[None],
indices: List[str] = Optional[None],
):
self.tg_op: str = tg_op
self.table: str = table
self.schema: str = schema
self.old: dict = old or {}
self.new: dict = new or {}
self.xmin: str = xmin
self.indices: List[str] = indices
@property
def data(self) -> dict:
"""Extract the payload data from the payload."""
if self.tg_op == DELETE and self.old:
return self.old
return self.new
def foreign_key_constraint(self, model) -> dict:
"""
{
'public.customer': { referred table with a fully qualified name
'local': 'customer_id',
'remote': 'id',
'value': 1
},
'public.group': { referred table with a fully qualified name
'local': 'group_id',
'remote': 'id',
'value': 1
}
}
"""
constraints: dict = {}
for foreign_key in model.foreign_keys:
referred_table: str = str(foreign_key.constraint.referred_table)
constraints.setdefault(referred_table, {})
if foreign_key.constraint.column_keys:
if foreign_key.constraint.column_keys[0] in self.data:
constraints[referred_table] = {
"local": foreign_key.constraint.column_keys[0],
"remote": foreign_key.column.name,
"value": self.data[
foreign_key.constraint.column_keys[0]
],
}
return constraints
class TupleIdentifierType(sa.types.UserDefinedType):
cache_ok: bool = True
def get_col_spec(self, **kwargs) -> str:
return "TID"
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
class Base(object):
def __init__(
self, database: str, verbose: bool = False, *args, **kwargs
) -> None:
"""Initialize the base class constructor."""
self.__engine: sa.engine.Engine = _pg_engine(
database, echo=False, **kwargs
)
self.__schemas: Optional[dict] = None
# models is a dict of f'{schema}.{table}'
self.__models: dict = {}
self.__metadata: dict = {}
self.__indices: dict = {}
self.__views: dict = {}
self.__materialized_views: dict = {}
self.__tables: dict = {}
self.__columns: dict = {}
self.verbose: bool = verbose
self._conn = None
def connect(self) -> None:
"""Connect to database."""
try:
conn = self.engine.connect()
conn.close()
except Exception as e:
logger.exception(f"Cannot connect to database: {e}")
raise
def pg_settings(self, column: str) -> Optional[str]:
try:
return self.fetchone(
sa.select([sa.column("setting")])
.select_from(sa.text("pg_settings"))
.where(sa.column("name") == column),
label="pg_settings",
)[0]
except (TypeError, IndexError):
return None
def _can_create_replication_slot(self, slot_name: str) -> None:
"""Check if the given user can create and destroy replication slots."""
if self.replication_slots(slot_name):
logger.exception(f"Replication slot {slot_name} already exists")
self.drop_replication_slot(slot_name)
try:
self.create_replication_slot(slot_name)
except Exception as e:
logger.exception(f"{e}")
raise ReplicationSlotError(
f'PG_USER "{self.engine.url.username}" needs to be '
f"superuser or have permission to read, create and destroy "
f"replication slots to perform this action."
)
else:
self.drop_replication_slot(slot_name)
# Tables...
def models(self, table: str, schema: str) -> sa.sql.Alias:
"""Get an SQLAlchemy model representation from a table.
Args:
table (str): The tablename
schema (str): The database schema
Returns:
The SQLAlchemy aliased model representation
"""
name: str = f"{schema}.{table}"
if name not in self.__models:
if schema not in self.__metadata:
metadata = sa.MetaData(schema=schema)
metadata.reflect(self.engine, views=True)
self.__metadata[schema] = metadata
metadata = self.__metadata[schema]
if name not in metadata.tables:
raise TableNotFoundError(
f'Table "{name}" not found in registry'
)
model = metadata.tables[name]
model.append_column(sa.Column("xmin", sa.BigInteger))
model.append_column(sa.Column("ctid"), TupleIdentifierType)
# support SQLAlchemy/Postgres 14 which somehow now reflects
# the oid column
if "oid" not in [column.name for column in model.columns]:
model.append_column(
sa.Column("oid", sa.dialects.postgresql.OID)
)
model = model.alias()
setattr(
model,
"primary_keys",
sorted([primary_key.key for primary_key in model.primary_key]),
)
self.__models[f"{model.original}"] = model
return self.__models[name]
@property
def conn(self):
return self._conn
@property
def database(self) -> str:
"""str: Get the database name."""
return self.engine.url.database
@property
def session(self) -> sessionmaker:
Session = sessionmaker(bind=self.engine.connect(), autoflush=True)
return Session()
@property
def engine(self) -> sa.engine.Engine:
"""Get the database engine."""
return self.__engine
@property
def schemas(self) -> dict:
"""Get the database schema names."""
if self.__schemas is None:
self.__schemas = sa.inspect(self.engine).get_schema_names()
for schema in BUILTIN_SCHEMAS:
if schema in self.__schemas:
self.__schemas.remove(schema)
return self.__schemas
def views(self, schema: str) -> list:
"""Get all materialized and non-materialized views."""
return self._views(schema) + self._materialized_views(schema)
def _views(self, schema: str) -> list:
"""Get all non-materialized views."""
if schema not in self.__views:
self.__views[schema] = []
for table in sa.inspect(self.engine).get_view_names(schema):
if is_view(self.engine, schema, table, materialized=False):
self.__views[schema].append(table)
return self.__views[schema]
def _materialized_views(self, schema: str) -> list:
"""Get all materialized views."""
if schema not in self.__materialized_views:
self.__materialized_views[schema] = []
for table in sa.inspect(self.engine).get_view_names(schema):
if is_view(self.engine, schema, table, materialized=True):
self.__materialized_views[schema].append(table)
return self.__materialized_views[schema]
def indices(self, table: str, schema: str) -> list:
"""Get the database table indexes."""
if (table, schema) not in self.__indices:
indexes = sa.inspect(self.engine).get_indexes(table, schema=schema)
self.__indices[(table, schema)] = sorted(
indexes, key=lambda d: d["name"]
)
return self.__indices[(table, schema)]
def tables(self, schema: str) -> list:
"""Get the table names for current schema."""
if schema not in self.__tables:
self.__tables[schema] = sorted(
sa.inspect(self.engine).get_table_names(schema)
)
return self.__tables[schema]
def columns(self, schema: str, table: str) -> list:
"""Get the column names for a table/view."""
if (table, schema) not in self.__columns:
columns = sa.inspect(self.engine).get_columns(table, schema=schema)
self.__columns[(table, schema)] = sorted(
[column["name"] for column in columns]
)
return self.__columns[(table, schema)]
def truncate_table(self, table: str, schema: str = DEFAULT_SCHEMA) -> None:
"""Truncate a table.
Note:
we need to quote table names that can be reserved sql statements
like user
Args:
table (str): The tablename
schema (str): The database schema
"""
logger.debug(f"Truncating table: {schema}.{table}")
self.execute(sa.DDL(f'TRUNCATE TABLE "{schema}"."{table}" CASCADE'))
def truncate_tables(
self, tables: List[str], schema: str = DEFAULT_SCHEMA
) -> None:
"""Truncate all tables."""
logger.debug(f"Truncating tables: {tables}")
for table in tables:
self.truncate_table(table, schema=schema)
def truncate_schema(self, schema: str) -> None:
"""Truncate all tables in a schema."""
logger.debug(f"Truncating schema: {schema}")
self.truncate_tables(self.tables(schema), schema=schema)
def truncate_schemas(self) -> None:
"""Truncate all tables in a database."""
for schema in self.schemas:
self.truncate_schema(schema)
# Replication slots...
def replication_slots(
self,
slot_name: str,
plugin: str = PLUGIN,
slot_type: str = "logical",
) -> List[str]:
"""List replication slots.
SELECT * FROM PG_REPLICATION_SLOTS
"""
return self.fetchall(
sa.select(["*"])
.select_from(sa.text("PG_REPLICATION_SLOTS"))
.where(
sa.and_(
*[
sa.column("slot_name") == slot_name,
sa.column("slot_type") == slot_type,
sa.column("plugin") == plugin,
]
)
),
label="replication_slots",
)
def create_replication_slot(self, slot_name: str) -> None:
"""Create a replication slot.
TODO:
- Only create the replication slot if it does not exist
otherwise warn that it already exists and return
SELECT * FROM PG_REPLICATION_SLOTS
"""
logger.debug(f"Creating replication slot: {slot_name}")
return self.fetchone(
sa.select(["*"]).select_from(
sa.func.PG_CREATE_LOGICAL_REPLICATION_SLOT(
slot_name,
PLUGIN,
)
),
label="create_replication_slot",
)
def drop_replication_slot(self, slot_name: str) -> None:
"""Drop a replication slot."""
logger.debug(f"Dropping replication slot: {slot_name}")
if self.replication_slots(slot_name):
try:
return self.fetchone(
sa.select(["*"]).select_from(
sa.func.PG_DROP_REPLICATION_SLOT(slot_name),
),
label="drop_replication_slot",
)
except Exception as e:
logger.exception(f"{e}")
raise
def _logical_slot_changes(
self,
slot_name: str,
func: sa.sql.functions._FunctionGenerator,
txmin: Optional[int] = None,
txmax: Optional[int] = None,
upto_lsn: Optional[int] = None,
upto_nchanges: Optional[int] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> sa.sql.Select:
filters: list = []
statement: sa.sql.Select = sa.select(
[sa.column("xid"), sa.column("data")]
).select_from(
func(
slot_name,
upto_lsn,
upto_nchanges,
)
)
if txmin is not None:
filters.append(
sa.cast(
sa.cast(sa.column("xid"), sa.Text),
sa.BigInteger,
)
>= txmin
)
if txmax is not None:
filters.append(
sa.cast(
sa.cast(sa.column("xid"), sa.Text),
sa.BigInteger,
)
< txmax
)
if filters:
statement = statement.where(sa.and_(*filters))
if limit is not None:
statement = statement.limit(limit)
if offset is not None:
statement = statement.offset(offset)
return statement
def logical_slot_get_changes(
self,
slot_name: str,
txmin: Optional[int] = None,
txmax: Optional[int] = None,
upto_lsn: Optional[int] = None,
upto_nchanges: Optional[int] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> None:
"""Get/Consume changes from a logical replication slot.
To get one change and data in existing replication slot:
SELECT * FROM PG_LOGICAL_SLOT_GET_CHANGES('testdb', NULL, 1)
To get ALL changes and data in existing replication slot:
SELECT * FROM PG_LOGICAL_SLOT_GET_CHANGES('testdb', NULL, NULL)
"""
statement: sa.sql.Select = self._logical_slot_changes(
slot_name,
sa.func.PG_LOGICAL_SLOT_GET_CHANGES,
txmin=txmin,
txmax=txmax,
upto_lsn=upto_lsn,
upto_nchanges=upto_nchanges,
limit=limit,
offset=offset,
)
self.execute(statement, options=dict(stream_results=STREAM_RESULTS))
def logical_slot_peek_changes(
self,
slot_name: str,
txmin: Optional[int] = None,
txmax: Optional[int] = None,
upto_lsn: Optional[int] = None,
upto_nchanges: Optional[int] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> List[sa.engine.row.Row]:
"""Peek a logical replication slot without consuming changes.
SELECT * FROM PG_LOGICAL_SLOT_PEEK_CHANGES('testdb', NULL, 1)
"""
statement: sa.sql.Select = self._logical_slot_changes(
slot_name,
sa.func.PG_LOGICAL_SLOT_PEEK_CHANGES,
txmin=txmin,
txmax=txmax,
upto_lsn=upto_lsn,
upto_nchanges=upto_nchanges,
limit=limit,
offset=offset,
)
return self.fetchall(statement)
def logical_slot_count_changes(
self,
slot_name: str,
txmin: Optional[int] = None,
txmax: Optional[int] = None,
upto_lsn: Optional[int] = None,
upto_nchanges: Optional[int] = None,
) -> int:
statement: sa.sql.Select = self._logical_slot_changes(
slot_name,
sa.func.PG_LOGICAL_SLOT_PEEK_CHANGES,
txmin=txmin,
txmax=txmax,
upto_lsn=upto_lsn,
upto_nchanges=upto_nchanges,
)
with self.engine.connect() as conn:
return conn.execute(
statement.with_only_columns([sa.func.COUNT()])
).scalar()
# Views...
def create_view(
self,
index: str,
schema: str,
tables: Set,
user_defined_fkey_tables: dict,
) -> None:
create_view(
self.engine,
self.models,
self.fetchall,
index,
schema,
tables,
user_defined_fkey_tables,
self._materialized_views(schema),
)
def drop_view(self, schema: str) -> None:
"""Drop a view."""
logger.debug(f"Dropping view: {schema}.{MATERIALIZED_VIEW}")
self.engine.execute(DropView(schema, MATERIALIZED_VIEW))
logger.debug(f"Dropped view: {schema}.{MATERIALIZED_VIEW}")
def refresh_view(
self, name: str, schema: str, concurrently: bool = False
) -> None:
"""Refresh a materialized view."""
logger.debug(f"Refreshing view: {schema}.{name}")
self.engine.execute(
RefreshView(schema, name, concurrently=concurrently)
)
logger.debug(f"Refreshed view: {schema}.{name}")
# Triggers...
def create_triggers(
self,
schema: str,
tables: Optional[List[str]] = None,
join_queries: bool = False,
) -> None:
"""Create a database triggers."""
queries: List[str] = []
for table in self.tables(schema):
if (tables and table not in tables) or (
table in self.views(schema)
):
continue
logger.debug(f"Creating trigger on table: {schema}.{table}")
for name, level, tg_op in [
("notify", "ROW", ["INSERT", "UPDATE", "DELETE"]),
("truncate", "STATEMENT", ["TRUNCATE"]),
]:
self.drop_triggers(schema, [table])
queries.append(
f'CREATE TRIGGER "{table}_{name}" '
f'AFTER {" OR ".join(tg_op)} ON "{schema}"."{table}" '
f"FOR EACH {level} EXECUTE PROCEDURE "
f"{schema}.{TRIGGER_FUNC}()",
)
if join_queries:
if queries:
self.execute(sa.DDL("; ".join(queries)))
else:
for query in queries:
self.execute(sa.DDL(query))
def drop_triggers(
self,
schema: str,
tables: Optional[List[str]] = None,
join_queries: bool = False,
) -> None:
"""Drop all pgsync defined triggers in database."""
queries: List[str] = []
for table in self.tables(schema):
if tables and table not in tables:
continue
logger.debug(f"Dropping trigger on table: {schema}.{table}")
for name in ("notify", "truncate"):
queries.append(
f'DROP TRIGGER IF EXISTS "{table}_{name}" ON '
f'"{schema}"."{table}"'
)
if join_queries:
if queries:
self.execute(sa.DDL("; ".join(queries)))
else:
for query in queries:
self.execute(sa.DDL(query))
def create_function(self, schema: str) -> None:
self.execute(
CREATE_TRIGGER_TEMPLATE.replace(
MATERIALIZED_VIEW,
f"{schema}.{MATERIALIZED_VIEW}",
).replace(
TRIGGER_FUNC,
f"{schema}.{TRIGGER_FUNC}",
)
)
def drop_function(self, schema: str) -> None:
self.execute(
sa.DDL(
f'DROP FUNCTION IF EXISTS "{schema}".{TRIGGER_FUNC}() CASCADE'
)
)
def disable_triggers(self, schema: str) -> None:
"""Disable all pgsync defined triggers in database."""
for table in self.tables(schema):
logger.debug(f"Disabling trigger on table: {schema}.{table}")
for name in ("notify", "truncate"):
self.execute(
sa.DDL(
f'ALTER TABLE "{schema}"."{table}" '
f"DISABLE TRIGGER {table}_{name}"
)
)
def enable_triggers(self, schema: str) -> None:
"""Enable all pgsync defined triggers in database."""
for table in self.tables(schema):
logger.debug(f"Enabling trigger on table: {schema}.{table}")
for name in ("notify", "truncate"):
self.execute(
sa.DDL(
f'ALTER TABLE "{schema}"."{table}" '
f"ENABLE TRIGGER {table}_{name}"
)
)
@property
def txid_current(self) -> int:
"""
Get last committed transaction id from the database.
SELECT txid_current()
"""
return self.fetchone(
sa.select(["*"]).select_from(sa.func.TXID_CURRENT()),
label="txid_current",
)[0]
def parse_value(self, type_: str, value: str) -> Optional[str]:
"""
Parse datatypes from db.
NB: All integers are long in python3 and call to convert is just int
"""
if value.lower() == "null":
return None
if type_.lower() in (
"bigint",
"bigserial",
"int",
"int2",
"int4",
"int8",
"integer",
"serial",
"serial2",
"serial4",
"serial8",
"smallint",
"smallserial",
):
try:
value = int(value)
except ValueError:
raise
if type_.lower() in (
"char",
"character",
"character varying",
"text",
"uuid",
"varchar",
):
value = value.lstrip("'").rstrip("'")
if type_.lower() == "boolean":
value = bool(value)
if type_.lower() in (
"double precision",
"float4",
"float8",
"real",
):
try:
value = float(value)
except ValueError:
raise
return value
def parse_logical_slot(self, row: str) -> Payload:
def _parse_logical_slot(data: str) -> Tuple[str, str]:
while True:
match = LOGICAL_SLOT_SUFFIX.search(data)
if not match:
break
key: str = match.groupdict().get("key")
if key:
key = key.replace('"', "")
value: str = match.groupdict().get("value")
type_: str = match.groupdict().get("type")
value = self.parse_value(type_, value)
# set data for next iteration of the loop
data = f"{data[match.span()[1]:]} "
yield key, value
match = LOGICAL_SLOT_PREFIX.search(row)
if not match:
raise LogicalSlotParseError(f"No match for row: {row}")
data = {"old": None, "new": None}
data.update(**match.groupdict())
payload: Payload = Payload(**data)
span = match.span()
# including trailing space below is deliberate
suffix: str = f"{row[span[1]:]} "
if "old-key" and "new-tuple" in suffix:
# this can only be an UPDATE operation
if payload.tg_op != UPDATE:
msg = f"Unknown {payload.tg_op} operation for row: {row}"
raise LogicalSlotParseError(msg)
i: int = suffix.index("old-key:")
if i > -1:
j: int = suffix.index("new-tuple:")
s: str = suffix[i + len("old-key:") : j]
for key, value in _parse_logical_slot(s):
payload.old[key] = value
i = suffix.index("new-tuple:")
if i > -1:
s = suffix[i + len("new-tuple:") :]
for key, value in _parse_logical_slot(s):
payload.new[key] = value
else:
# this can be an INSERT, DELETE, UPDATE or TRUNCATE operation
if payload.tg_op not in TG_OP:
raise LogicalSlotParseError(
f"Unknown {payload.tg_op} operation for row: {row}"
)
for key, value in _parse_logical_slot(suffix):
payload.new[key] = value
return payload
# Querying...
def execute(
self,
statement: sa.sql.Select,
values: Optional[list] = None,
options: Optional[dict] = None,
) -> None:
"""Execute a query statement."""
pg_execute(self.engine, statement, values=values, options=options)
def fetchone(
self,
statement: sa.sql.Select,
label: Optional[str] = None,
literal_binds: bool = False,
) -> sa.engine.Row:
"""Fetch one row query."""
if self.verbose:
compiled_query(statement, label=label, literal_binds=literal_binds)
conn = self.engine.connect()
try:
row = conn.execute(statement).fetchone()
conn.close()
except Exception as e:
logger.exception(f"Exception {e}")
raise
return row
def fetchall(
self,
statement: sa.sql.Select,
label: Optional[str] = None,
literal_binds: bool = False,
) -> List[sa.engine.Row]:
"""Fetch all rows from a query statement."""
if self.verbose:
compiled_query(statement, label=label, literal_binds=literal_binds)
conn = self.engine.connect()
try:
rows = conn.execute(statement).fetchall()
conn.close()
except Exception as e:
logger.exception(f"Exception {e}")
raise
return rows
def fetchmany(
self,
statement: sa.sql.Select,
chunk_size: Optional[int] = None,
stream_results: Optional[bool] = None,
):
chunk_size = chunk_size or QUERY_CHUNK_SIZE
stream_results = stream_results or STREAM_RESULTS
with self.engine.connect() as conn:
result = conn.execution_options(
stream_results=stream_results
).execute(statement.select())
for partition in result.partitions(chunk_size):
for keys, row, *primary_keys in partition:
yield keys, row, primary_keys
result.close()
self.engine.clear_compiled_cache()
def fetchcount(self, statement: sa.sql.Subquery) -> int:
with self.engine.connect() as conn:
return conn.execute(
statement.original.with_only_columns(
[sa.func.COUNT()]
).order_by(None)
).scalar()
# helper methods
def subtransactions(session):
"""Context manager for executing code within a sub-transaction."""
class ControlledExecution:
def __init__(self, session):
self.session = session
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
try:
self.session.commit()
except Exception:
self.session.rollback()
raise
return ControlledExecution(session)
def pg_engine(
database: str,
user: Optional[str] = None,
host: Optional[str] = None,
password: Optional[str] = None,
port: Optional[int] = None,
echo: bool = False,
sslmode: Optional[str] = None,
sslrootcert: Optional[str] = None,
):
"""Context manager for managing engines."""
class ControlledExecution:
def __init__(
self,
database: str,
user: Optional[str] = None,
host: Optional[str] = None,
password: Optional[str] = None,
port: Optional[int] = None,
echo: bool = False,
sslmode: Optional[str] = None,
sslrootcert: Optional[str] = None,
):
self.database = database
self.user = user
self.host = host
self.password = password
self.port = port
self.echo = echo
self.sslmode = sslmode
self.sslrootcert = sslrootcert
def __enter__(self) -> sa.engine.Engine:
self._engine = _pg_engine(
database,
user=self.user,
host=self.host,
password=self.password,
port=self.port,
echo=self.echo,
sslmode=self.sslmode,
sslrootcert=self.sslrootcert,
)
return self._engine
def __exit__(self, type, value, traceback) -> None:
self._engine.connect().close()
self._engine.dispose()
return ControlledExecution(
database,
user=user,
host=host,
password=host,
port=port,
echo=echo,
sslmode=sslmode,
sslrootcert=sslrootcert,
)
def _pg_engine(
database: str,
user: Optional[str] = None,
host: Optional[str] = None,
password: Optional[str] = None,
port: Optional[int] = None,
echo: bool = False,
sslmode: Optional[str] = None,
sslrootcert: Optional[str] = None,
) -> sa.engine.Engine:
connect_args: dict = {}
sslmode = sslmode or PG_SSLMODE
sslrootcert = sslrootcert or PG_SSLROOTCERT
if sslmode:
if sslmode not in (
"allow",
"disable",
"prefer",
"require",
"verify-ca",
"verify-full",
):
raise ValueError(f'Invalid sslmode: "{sslmode}"')
connect_args["sslmode"] = sslmode
if sslrootcert:
if not os.path.exists(sslrootcert):
raise IOError(
f'"{sslrootcert}" not found.\n'
f"Provide a valid file containing SSL certificate "
f"authority (CA) certificate(s)."
)
connect_args["sslrootcert"] = sslrootcert
url: str = get_postgres_url(
database,
user=user,
host=host,
password=password,
port=port,
)
return sa.create_engine(url, echo=echo, connect_args=connect_args)
def pg_execute(
engine: sa.engine.Engine,
statement: sa.sql.Select,
values: Optional[list] = None,
options: Optional[dict] = None,
) -> None:
options = options or {"isolation_level": "AUTOCOMMIT"}
conn = engine.connect()
try:
if options:
conn = conn.execution_options(**options)
conn.execute(statement, values)
conn.close()
except Exception as e:
logger.exception(f"Exception {e}")
raise
def create_schema(database: str, schema: str, echo: bool = False) -> None:
"""Create database schema."""
logger.debug(f"Creating schema: {schema}")
with pg_engine(database, echo=echo) as engine:
pg_execute(engine, sa.DDL(f"CREATE SCHEMA IF NOT EXISTS {schema}"))
logger.debug(f"Created schema: {schema}")
def create_database(database: str, echo: bool = False) -> None:
"""Create a database."""
logger.debug(f"Creating database: {database}")
with pg_engine("postgres", echo=echo) as engine:
pg_execute(engine, sa.DDL(f'CREATE DATABASE "{database}"'))
logger.debug(f"Created database: {database}")
def drop_database(database: str, echo: bool = False) -> None:
"""Drop a database."""
logger.debug(f"Dropping database: {database}")
with pg_engine("postgres", echo=echo) as engine:
pg_execute(engine, sa.DDL(f'DROP DATABASE IF EXISTS "{database}"'))
logger.debug(f"Dropped database: {database}")
def database_exists(database: str, echo: bool = False) -> bool:
"""Check if database is present."""
with pg_engine("postgres", echo=echo) as engine:
conn = engine.connect()
try:
row = conn.execute(
sa.DDL(
f"SELECT 1 FROM pg_database WHERE datname = '{database}'"
)
).first()
conn.close()
except Exception as e:
logger.exception(f"Exception {e}")
raise
return row is not None
def create_extension(
database: str, extension: str, echo: bool = False
) -> None:
"""Create a database extension."""
logger.debug(f"Creating extension: {extension}")
with pg_engine(database, echo=echo) as engine:
pg_execute(
engine,
sa.DDL(f'CREATE EXTENSION IF NOT EXISTS "{extension}"'),
)
logger.debug(f"Created extension: {extension}")
def drop_extension(database: str, extension: str, echo: bool = False) -> None:
"""Drop a database extension."""
logger.debug(f"Dropping extension: {extension}")
with pg_engine(database, echo=echo) as engine:
pg_execute(engine, sa.DDL(f'DROP EXTENSION IF EXISTS "{extension}"'))
logger.debug(f"Dropped extension: {extension}") | /retake_pgsync-2.5.4-py3-none-any.whl/pgsync/base.py | 0.770335 | 0.159577 | base.py | pypi |
import logging
from typing import Any, Dict, Optional
from .constants import ( # noqa
CONCAT_TRANSFORM,
RENAME_TRANSFORM,
REPLACE_TRANSFORM,
)
logger = logging.getLogger(__name__)
class Transform(object):
"""Transform is really a builtin plugin"""
@classmethod
def rename(cls, data: dict, nodes: dict) -> dict:
"""Rename keys in a nested dictionary based on transform_node.
"rename": {
"id": "publisher_id",
"name": "publisher_name"
},
"""
return cls._rename(data, cls.get(nodes, RENAME_TRANSFORM))
@classmethod
def _rename(
cls, data: dict, nodes: dict, result: Optional[dict] = None
) -> dict:
"""Rename keys in a nested dictionary based on transform_node.
"rename": {
"id": "publisher_id",
"name": "publisher_name"
},
"""
result = result or {}
if isinstance(data, dict):
for key, value in data.items():
if isinstance(nodes.get(key), str):
key = nodes[key]
elif isinstance(value, dict):
if key in nodes:
value = cls._rename(value, nodes[key])
elif key in nodes.keys():
if isinstance(value, list):
value = [cls._rename(v, nodes[key]) for v in value]
elif isinstance(value, (str, int, float)):
if nodes[key]:
key = str(nodes[key])
result[key] = value
return result
@classmethod
def concat(cls, data: dict, nodes: dict) -> dict:
"""Concatenate column values into a new field
{
"columns": ["publisher_id", "publisher_name", "is_active", "foo"],
"destination": "new_field",
"delimiter": "-"
},
"""
return cls._concat(data, cls.get(nodes, CONCAT_TRANSFORM))
@classmethod
def _concat(
cls, data: dict, nodes: dict, result: Optional[dict] = None
) -> dict:
"""Concatenate column values into a new field
{
"columns": ["publisher_id", "publisher_name", "is_active", "foo"],
"destination": "new_field",
"delimiter": "-"
},
"""
result = result or {}
if isinstance(nodes, list):
for node in nodes:
cls._concat(data, node, result=result)
if isinstance(data, dict):
if "columns" in nodes:
values: list = [data.get(key, key) for key in nodes["columns"]]
delimiter: str = nodes.get("delimiter", "")
destination: str = nodes["destination"]
data[destination] = f"{delimiter}".join(
map(str, filter(None, values))
)
for key, value in data.items():
if key in nodes:
if isinstance(value, dict):
value = cls._concat(value, nodes[key])
elif isinstance(value, list):
value = [
cls._concat(v, nodes[key])
for v in value
if key in nodes
]
result[key] = value
return result
"""
@classmethod
def replace(cls, data: dict, nodes: dict) -> dict:
# TODO!
Replace field where value is
"replace": {
"code": {
"-": "="
}
}
return cls._replace(data, cls.get(nodes, REPLACE_TRANSFORM)))
@classmethod
def _replace(
cls, data: dict, nodes: dict, result: Optional[dict] = None
) -> dict:
# TODO!
Replace field where value is
"replace": {
"code": {
"-": "="
}
}
result_dict = result_dict or {}
if isinstance(data, dict):
if nodes:
for key, values in nodes.items():
if key not in data:
continue
if isinstance(data[key], list):
for k in values:
for search, replace in values[k].items():
data[key] = [
x.replace(search, replace)
for x in data[key]
]
else:
for search, replace in values.items():
data[key] = data[key].replace(search, replace)
for key, value in data.items():
if isinstance(value, dict):
value = cls._replace(value, nodes.get(key))
elif isinstance(value, list):
value = [
cls._replace(v, nodes[key])
for v in value
if key in nodes
]
result_dict[key] = value
return result_dict
"""
@classmethod
def transform(cls, data: dict, nodes: dict):
data = cls.rename(data, nodes)
data = cls.concat(data, nodes)
# data = cls.replace(data, nodes)
return data
@classmethod
def get(cls, nodes: dict, type_: str) -> dict:
transform_node: dict = {}
if "transform" in nodes.keys():
if type_ in nodes["transform"]:
transform_node = nodes["transform"][type_]
for child in nodes.get("children", {}):
node: dict = cls.get(child, type_)
if node:
transform_node[child.get("label", child["table"])] = node
return transform_node
@classmethod
def get_primary_keys(cls, primary_keys: dict) -> dict:
"""Get private keys entry from a nested dict."""
def squash_list(values, _values=None):
if not _values:
_values = []
if isinstance(values, dict):
if len(values) == 1:
_values.append(values)
else:
for key, value in values.items():
_values.extend(squash_list({key: value}))
elif isinstance(values, list):
for value in values:
_values.extend(squash_list(value))
return _values
target = []
for values in squash_list(primary_keys):
if len(values) > 1:
for key, value in values.items():
target.append({key: value})
continue
target.append(values)
target3 = []
for values in target:
for key, value in values.items():
if isinstance(value, dict):
target3.append({key: value})
elif isinstance(value, list):
_value: Dict[Any, Any] = {}
for v in value:
for _k, _v in v.items():
_value.setdefault(_k, [])
if isinstance(_v, list):
_value[_k].extend(_v)
else:
_value[_k].append(_v)
target3.append({key: _value})
target4 = {}
for values in target3:
for key, value in values.items():
if key not in target4:
target4[key] = {}
for k, v in value.items():
if k not in target4[key]:
target4[key][k] = []
if isinstance(v, list):
for _v in v:
if _v not in target4[key][k]:
target4[key][k].append(_v)
else:
if v not in target4[key][k]:
target4[key][k].append(v)
target4[key][k] = sorted(target4[key][k])
return target4 | /retake_pgsync-2.5.4-py3-none-any.whl/pgsync/transform.py | 0.651466 | 0.394201 | transform.py | pypi |
class RelationshipTypeError(Exception):
"""
This error is raised if the relationship type is none of
"One to one", "One to many" or "Many to Many"
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RelationshipVariantError(Exception):
"""
This error is raised if the relationship variant is not one of
"Scalar" or "Object"
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RelationshipForeignKeyError(Exception):
"""
This error is raised if the relationship foreign key is not one of
"Child" or "Parent"
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RelationshipAttributeError(Exception):
"""
This error is raised if the relationship attribute is not one of
"type" or "variant"
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TableNotFoundError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TableNotInNodeError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InvalidSchemaError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InvalidTGOPError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class NodeAttributeError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ColumnNotFoundError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ForeignKeyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RelationshipError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MultipleThroughTablesError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ReplicationSlotError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class SchemaError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class FetchColumnForeignKeysError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class PrimaryKeyNotFoundError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class LogicalSlotParseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RDSError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value) | /retake_pgsync-2.5.4-py3-none-any.whl/pgsync/exc.py | 0.840881 | 0.301381 | exc.py | pypi |
import psycopg2
from psycopg2.extras import LogicalReplicationConnection
from typing import List, Generator, cast
from core.extract.base import Extractor, ExtractorResult
class ConnectionError(Exception):
pass
class PostgresExtractor(Extractor):
def __init__(self, dsn: str) -> None:
self.dsn = dsn
self._connect(dsn)
def _connect(self, dsn: str) -> None:
try:
self.connection = psycopg2.connect(
self.dsn, connection_factory=LogicalReplicationConnection
)
except psycopg2.ProgrammingError:
raise ConnectionError("Unable to connect to database")
except psycopg2.OperationalError:
raise ConnectionError("Unable to connect to database")
self.cursor = self.connection.cursor()
def teardown(self) -> None:
self.cursor.close() # type: ignore
self.connection.close()
def count(self, relation: str) -> int:
self.cursor.execute(f"SELECT COUNT(*) FROM {relation}")
row = self.cursor.fetchone()
if row:
return cast(int, row[0])
else:
return 0
def extract_all(
self, relation: str, columns: List[str], primary_key: str, chunk_size: int
) -> Generator[ExtractorResult, None, None]:
offset = 0
columns_str = ", ".join(columns)
while True:
self.cursor.execute(
f"""
SELECT {columns_str}, {primary_key}
FROM {relation}
ORDER BY {primary_key}
LIMIT %s
OFFSET %s
""",
(chunk_size, offset),
)
rows = self.cursor.fetchall()
if not rows:
break
# Extract primary keys from rows
primary_keys = [row[-1] for row in rows]
# Remove primary keys from rows
rows = [row[:-1] for row in rows]
yield {"rows": rows, "primary_keys": primary_keys}
offset += chunk_size | /retake-0.1.14.tar.gz/retake-0.1.14/core/extract/postgres.py | 0.52683 | 0.181735 | postgres.py | pypi |
from elasticsearch import Elasticsearch, helpers
from typing import Dict, List, Union, Optional, Any, cast
from core.load.base import Loader
from core.sdk.target import ElasticSearchTarget
class FieldTypeError(Exception):
pass
class ElasticSearchLoader(Loader):
def __init__(
self,
host: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
ssl_assert_fingerprint: Optional[str] = None,
cloud_id: Optional[str] = None,
index: Optional[bool] = False,
similarity: Optional[str] = None,
) -> None:
if index and similarity is None:
raise ValueError("Similarity must be provided if index is True")
if cloud_id:
self.es = Elasticsearch(cloud_id=cloud_id)
elif host and user and password and ssl_assert_fingerprint:
self.es = Elasticsearch(
hosts=[host],
basic_auth=(user, password),
ssl_assert_fingerprint=ssl_assert_fingerprint,
verify_certs=True,
)
elif host and user and password:
self.es = Elasticsearch(
hosts=[host],
basic_auth=(user, password),
verify_certs=False,
)
else:
raise ValueError(
"Either cloud_id or host, user, and password must be provided"
)
self.index = index
self.similarity = similarity
def _check_index_exists(self, index_name: str) -> bool:
return cast(bool, self.es.indices.exists(index=index_name))
def _create_index(
self, index_name: str, field_name: str, num_dimensions: int
) -> None:
mapping = cast(
Dict[str, Any],
{
"dynamic": True,
"_source": {"enabled": True},
"properties": {
field_name: {
"type": "dense_vector",
"dims": num_dimensions,
"index": self.index,
}
},
},
)
if self.similarity is not None:
mapping["properties"][field_name]["similarity"] = self.similarity
self.es.indices.create(index=index_name, mappings=mapping)
# Public Methods
def check_and_setup_index(
self, target: ElasticSearchTarget, num_dimensions: int
) -> None:
index_name = target.index_name
field_name = target.field_name
if not self._check_index_exists(index_name=index_name):
self._create_index(
index_name=index_name,
field_name=field_name,
num_dimensions=num_dimensions,
)
else:
current_mapping = self.es.indices.get_mapping(index=index_name)
if field_name in current_mapping[index_name]["mappings"]["properties"]:
# The field exists, check if it's a dense_vector field with the correct number of dimensions
field_mapping = current_mapping[index_name]["mappings"]["properties"][
field_name
]
if field_mapping["type"] != "dense_vector":
raise FieldTypeError(
f"Field '{field_name}' exists but is not a dense_vector field"
)
if field_mapping["dims"] != num_dimensions:
raise FieldTypeError(
f"Field '{field_name}' expects {field_mapping['dims']} dimensions but the embedding has {num_dimensions}"
)
else:
# The field does not exist, create it
new_field_mapping = {
field_name: {
"type": "dense_vector",
"dims": num_dimensions,
"index": True,
}
}
self.es.indices.put_mapping(
index=index_name, properties=new_field_mapping
)
# Public Methods
@Loader.validate
def bulk_upsert_embeddings(
self,
target: ElasticSearchTarget,
embeddings: List[List[float]],
ids: List[Union[str, int]],
metadata: Optional[List[Dict[str, Any]]],
) -> None:
index_name = target.index_name
field_name = target.field_name
num_embeddings = len(embeddings)
if metadata is None:
metadata = [{}] * num_embeddings
docs = [
{"_id": doc_id, "_source": {**{field_name: embedding}, **meta}}
for doc_id, embedding, meta in zip(ids, embeddings, metadata)
]
actions = [
{
"_op_type": "update",
"_index": index_name,
"_id": doc["_id"],
"doc": doc["_source"],
"doc_as_upsert": True,
}
for doc in docs
]
helpers.bulk(self.es, actions) | /retake-0.1.14.tar.gz/retake-0.1.14/core/load/elasticsearch.py | 0.902481 | 0.35262 | elasticsearch.py | pypi |
import pinecone
from core.load.base import Loader
from typing import Dict, List, Union, Optional, Any
from core.sdk.target import PineconeTarget
class PineconeLoader(Loader):
def __init__(
self,
api_key: str,
environment: str,
) -> None:
pinecone.init(api_key=api_key, environment=environment)
def _check_index_exists(self, index_name: str) -> bool:
try:
pinecone.describe_index(index_name)
return True
except pinecone.NotFoundException:
return False
def _get_num_dimensions(self, index_name: str) -> int:
return int(pinecone.describe_index(index_name).dimension)
def _create_index(self, index_name: str, num_dimensions: int) -> None:
pinecone.create_index(index_name, dimension=num_dimensions)
# Public Methods
def check_and_setup_index(
self, target: PineconeTarget, num_dimensions: int
) -> None:
index_name = target.index_name
if not self._check_index_exists(index_name=index_name):
self._create_index(index_name=index_name, num_dimensions=num_dimensions)
else:
index_dimensions = self._get_num_dimensions(index_name=index_name)
if index_dimensions != num_dimensions:
raise ValueError(
f"Index {index_name} already exists with {index_dimensions} dimensions but embedding has {num_dimensions}"
)
@Loader.validate
def bulk_upsert_embeddings(
self,
target: PineconeTarget,
embeddings: List[List[float]],
ids: List[Union[str, int]],
metadata: Optional[List[Dict[str, Any]]],
) -> None:
index_name = target.index_name
namespace = target.namespace
num_dimensions = len(embeddings[0])
num_embeddings = len(embeddings)
docs = []
if not all(len(embedding) == num_dimensions for embedding in embeddings):
raise ValueError("Not all embeddings have the same number of dimensions")
if not len(ids) == num_embeddings:
raise ValueError("Number of ids does not match number of embeddings")
if metadata is not None:
docs = [
{"id": doc_id, "values": embedding, "metadata": meta}
for doc_id, embedding, meta in zip(ids, embeddings, metadata)
]
else:
docs = [
{"id": doc_id, "values": embedding}
for doc_id, embedding in zip(ids, embeddings)
]
index = pinecone.Index(index_name)
index.upsert(vectors=docs, namespace=namespace) | /retake-0.1.14.tar.gz/retake-0.1.14/core/load/pinecone.py | 0.786828 | 0.302604 | pinecone.py | pypi |
from opensearchpy import OpenSearch
from typing import List, Union, Optional, Dict, Any
from core.load.base import Loader
from core.sdk.target import OpenSearchTarget
class OpenSearchLoader(Loader):
def __init__(
self,
hosts: List[Dict[str, str]],
user: str,
password: str,
use_ssl: bool,
cacerts: str,
) -> None:
auth = (user, password)
self.opensearch = OpenSearch(
hosts=hosts,
http_compress=True, # enables gzip compression for request bodies
http_auth=auth,
use_ssl=use_ssl,
verify_certs=use_ssl,
ssl_assert_hostname=False,
ssl_show_warn=False,
ca_certs=cacerts,
)
def _check_index_exists(self, index_name: str) -> bool:
return self.opensearch.indices.exists(index_name)
def _create_index(self, index_name: str) -> None:
index_body = {"settings": {"index": {"number_of_shards": 4}}}
self.opensearch.indices.create(index_name, body=index_body)
def check_and_setup_index(
self, target: OpenSearchTarget, num_dimensions: int = 0
) -> None:
index_name = target.index_name
if not self._check_index_exists(index_name=index_name):
self._create_index(index_name=index_name)
@Loader.validate
def bulk_upsert_embeddings(
self,
target: OpenSearchTarget,
embeddings: List[List[float]],
ids: List[Union[str, int]],
metadata: Optional[List[Dict[str, Any]]],
) -> None:
index_name = target.index_name
field_name = target.field_name
if metadata is not None:
docs: List[Dict[str, Any]] = []
for doc_id, embedding, meta in zip(ids, embeddings, metadata):
docs.append({"update": {"_index": index_name, "_id": doc_id}})
docs.append(
{"doc": {field_name: embedding, **meta}, "doc_as_upsert": True}
)
else:
docs = []
for doc_id, embedding in zip(ids, embeddings):
docs.append({"update": {"_index": index_name, "_id": doc_id}})
docs.append(
{
"doc": {
field_name: embedding,
},
"doc_as_upsert": True,
}
)
self.opensearch.bulk(body=docs) | /retake-0.1.14.tar.gz/retake-0.1.14/core/load/opensearch.py | 0.828558 | 0.242183 | opensearch.py | pypi |
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams, PointStruct
from core.load.base import Loader
from typing import Dict, List, Union, Optional, Any, cast
from core.sdk.target import QdrantTarget, QdrantSimilarity
class QdrantLoader(Loader):
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
similarity: Optional[QdrantSimilarity] = None,
) -> None:
if url and api_key:
self.client = QdrantClient(url=url, api_key=api_key)
elif host and port:
self.client = QdrantClient(host=host, port=port)
else:
raise ValueError(
"Either url and api_key (for Qdrant Cloud) or host and port (for self-hosted Qdrant) must be provided"
)
self.similarity = similarity
def _check_index_exists(self, index_name: str) -> bool:
response = self.client.get_collections()
return index_name in [collection.name for collection in response.collections]
def _create_index(self, index_name: str) -> None:
similarity = self.similarity.value if self.similarity else Distance.COSINE
self.client.recreate_collection(
collection_name=index_name,
vectors_config=VectorParams(size=100, distance=cast(Distance, similarity)),
)
def check_and_setup_index(
self, target: QdrantTarget, num_dimensions: int = 0
) -> None:
index_name = target.index_name
if not self._check_index_exists(index_name=index_name):
self._create_index(index_name=index_name)
@Loader.validate
def bulk_upsert_embeddings(
self,
target: QdrantTarget,
embeddings: List[List[float]],
ids: List[Union[str, int]],
metadata: Optional[List[Dict[str, Any]]],
) -> None:
collection_name = target.index_name
metadata = metadata if metadata else [{} for _ in range(len(ids))]
points = [
PointStruct(id=_id, vector=vector, payload=metadata)
for (_id, vector, metadata) in zip(ids, embeddings, metadata)
]
self.client.upsert(collection_name=collection_name, points=points) | /retake-0.1.14.tar.gz/retake-0.1.14/core/load/qdrant.py | 0.879677 | 0.254521 | qdrant.py | pypi |
import os
import uuid
from weaviate import Client, AuthApiKey
from core.load.base import Loader
from typing import Dict, List, Union, Optional, Any, cast
from core.sdk.target import WeaviateTarget, WeaviateVectorizer
DEFAULT_BATCH_SIZE = 100
UUID_NAMESPACE = uuid.NAMESPACE_DNS
class WeaviateLoader(Loader):
def __init__(
self,
api_key: str,
url: str,
default_vectorizer: WeaviateVectorizer,
default_vectorizer_config: Dict[str, str],
) -> None:
self.wc = Client(
url=url,
auth_client_secret=AuthApiKey(api_key=api_key),
)
self.default_vectorizer = default_vectorizer
self.default_vectorizer_config = default_vectorizer_config
def _check_index_exists(self, index_name: str) -> bool:
return cast(bool, self.wc.schema.exists(index_name))
def _create_index(self, index_name: str) -> None:
default_vectorizer = str(self.default_vectorizer.value)
self.wc.schema.create_class(
{
"class": index_name,
"vectorizer": default_vectorizer,
"moduleConfig": {default_vectorizer: self.default_vectorizer_config},
}
)
def check_and_setup_index(
self, target: WeaviateTarget, num_dimensions: int = 0
) -> None:
index_name = target.index_name
if not self._check_index_exists(index_name=index_name):
self._create_index(index_name=index_name)
@Loader.validate
def bulk_upsert_embeddings(
self,
target: WeaviateTarget,
embeddings: List[List[float]],
ids: List[Union[str, int]],
metadata: Optional[List[Dict[str, Any]]],
) -> None:
class_name = target.index_name
data_objects = metadata if metadata else [{} for _ in range(len(ids))]
with self.wc.batch(
batch_size=DEFAULT_BATCH_SIZE,
num_workers=os.cpu_count(),
dynamic=True,
):
for embedding, id, data in zip(embeddings, ids, data_objects):
if embedding:
self.wc.batch.add_data_object(
class_name=class_name,
data_object=data,
vector=embedding,
uuid=str(uuid.uuid5(UUID_NAMESPACE, str(id))),
)
else:
self.wc.batch.add_data_object(
class_name=class_name,
data_object=data,
uuid=str(uuid.uuid5(UUID_NAMESPACE, str(id))),
) | /retake-0.1.14.tar.gz/retake-0.1.14/core/load/weaviate.py | 0.759225 | 0.180793 | weaviate.py | pypi |
from pydantic import BaseModel
from enum import Enum
from typing import Optional, Dict, Any
class ElasticSimilarity(Enum):
L2_NORM = "l2_norm"
DOT_PRODUCT = "dot_product"
COSINE = "cosine"
class QdrantSimilarity(Enum):
COSINE = "Cosine"
EUCLID = "Euclid"
DOT = "Dot"
class WeaviateVectorizer(Enum):
COHERE = "text2vec-cohere"
OPENAI = "text2vec-openai"
PALM = "text2vec-palm"
HUGGINGFACE = "text2vec-huggingface"
TRANSFORMERS = "text2vec-transformers"
CONTEXTIONARY = "text2vec-contextionary"
class ElasticSearchTarget(BaseModel):
index_name: str
field_name: str
should_index: bool
similarity: Optional[ElasticSimilarity] = None
class OpenSearchTarget(BaseModel):
index_name: str
field_name: str
class PineconeTarget(BaseModel):
index_name: str
namespace: str
class WeaviateTarget(BaseModel):
index_name: str
default_vectorizer: WeaviateVectorizer
default_vectorizer_config: Dict[str, Any]
class QdrantTarget(BaseModel):
index_name: str
similarity: Optional[QdrantSimilarity] = None
class Target:
@classmethod
def ElasticSearch(
cls,
index_name: str,
field_name: str,
should_index: bool = True,
similarity: Optional[ElasticSimilarity] = None,
) -> ElasticSearchTarget:
return ElasticSearchTarget(
index_name=index_name,
field_name=field_name,
should_index=should_index,
similarity=similarity,
)
@classmethod
def OpenSearch(cls, index_name: str, field_name: str) -> OpenSearchTarget:
return OpenSearchTarget(index_name=index_name, field_name=field_name)
@classmethod
def Pinecone(cls, index_name: str, namespace: str) -> PineconeTarget:
return PineconeTarget(
index_name=index_name,
namespace=namespace,
)
@classmethod
def Weaviate(
cls,
index_name: str,
default_vectorizer: WeaviateVectorizer,
default_vectorizer_config: Dict[str, str],
) -> WeaviateTarget:
return WeaviateTarget(
index_name=index_name,
default_vectorizer=default_vectorizer,
default_vectorizer_config=default_vectorizer_config,
)
@classmethod
def Qdrant(
cls,
index_name: str,
similarity: Optional[QdrantSimilarity] = None,
) -> QdrantTarget:
return QdrantTarget(
index_name=index_name,
similarity=similarity,
) | /retake-0.1.14.tar.gz/retake-0.1.14/core/sdk/target.py | 0.887357 | 0.2084 | target.py | pypi |
from tqdm import tqdm
from typing import Union, Tuple, Any, Optional, Dict, List, cast
from core.sdk.embedding import (
OpenAIEmbedding,
SentenceTransformerEmbedding,
CohereEmbedding,
CustomEmbedding,
)
from core.sdk.source import PostgresSource
from core.sdk.sink import (
ElasticSearchSink,
OpenSearchSink,
PineconeSink,
WeaviateSink,
QdrantSink,
)
from core.sdk.target import (
ElasticSearchTarget,
OpenSearchTarget,
PineconeTarget,
WeaviateTarget,
QdrantTarget,
)
from core.sdk.realtime import RealtimeServer
from core.load.elasticsearch import ElasticSearchLoader
from core.load.opensearch import OpenSearchLoader
from core.load.pinecone import PineconeLoader
from core.load.weaviate import WeaviateLoader
from core.load.qdrant import QdrantLoader
from core.extract.postgres import PostgresExtractor
from core.transform.openai import OpenAIEmbedding as OpenAI
from core.transform.sentence_transformers import (
SentenceTransformerEmbedding as SentenceTransformer,
)
from core.transform.cohere import CohereEmbedding as Cohere
from core.transform.custom import CustomEmbedding as Custom
from streams.app import (
register_connector_conf,
wait_for_config_success,
register_agents,
start_worker,
)
from core.sdk.types import (
Source,
Transform,
Embedding,
Sink,
Target,
Extractor,
Loader,
Model,
)
BATCH_SIZE = 100
class Pipeline:
def __init__(
self,
source: Source,
sink: Sink,
target: Target,
embedding: Embedding,
transform: Optional[Transform],
):
self.source = source
self.transform = transform
self.embedding = embedding
self.sink = sink
self.target = target
self.extractor = self._get_extractor()
self.loader = self._get_loader()
self.model = self._get_model()
def _get_extractor(self) -> Extractor:
if isinstance(self.source, PostgresSource):
return PostgresExtractor(self.source.dsn)
else:
raise ValueError("Invalid Source type")
def _get_loader(self) -> Loader:
if isinstance(self.sink, ElasticSearchSink) and isinstance(
self.target, ElasticSearchTarget
):
return ElasticSearchLoader(
host=self.sink.host,
user=self.sink.user,
password=self.sink.password,
ssl_assert_fingerprint=self.sink.ssl_assert_fingerprint,
cloud_id=self.sink.cloud_id,
)
elif isinstance(self.sink, OpenSearchSink) and isinstance(
self.target, OpenSearchTarget
):
return OpenSearchLoader(
hosts=self.sink.hosts,
user=self.sink.user,
password=self.sink.password,
use_ssl=self.sink.use_ssl,
cacerts=self.sink.cacerts,
)
elif isinstance(self.sink, PineconeSink) and isinstance(
self.target, PineconeTarget
):
return PineconeLoader(
api_key=self.sink.api_key,
environment=self.sink.environment,
)
elif isinstance(self.sink, WeaviateSink) and isinstance(
self.target, WeaviateTarget
):
return WeaviateLoader(
api_key=self.sink.api_key,
url=self.sink.url,
default_vectorizer=self.target.default_vectorizer,
default_vectorizer_config=self.target.default_vectorizer_config,
)
elif isinstance(self.sink, QdrantSink) and isinstance(
self.target, QdrantTarget
):
return QdrantLoader(
host=self.sink.host,
port=self.sink.port,
url=self.sink.url,
api_key=self.sink.api_key,
similarity=self.target.similarity,
)
else:
raise ValueError("Target and Sink types do not match")
def _get_model(self) -> Model:
if isinstance(self.embedding, OpenAIEmbedding):
return OpenAI(api_key=self.embedding.api_key, model=self.embedding.model)
elif isinstance(self.embedding, SentenceTransformerEmbedding):
return SentenceTransformer(model=self.embedding.model)
elif isinstance(self.embedding, CohereEmbedding):
return Cohere(api_key=self.embedding.api_key, model=self.embedding.model)
elif isinstance(self.embedding, CustomEmbedding):
return Custom(func=self.embedding.func)
else:
raise ValueError("Invalid Embedding type")
def _apply_transform(self, row: Tuple[str, ...]) -> str:
if not self.transform:
raise ValueError(
"Transform expected but got None. Did you forget to provide a transform argument?"
)
return cast(str, self.transform.transform_func(*row))
def _create_metadata(self, row: Tuple[str, ...]) -> Dict[str, Any]:
if not self.transform:
raise ValueError(
"Transform expected but got None. Did you forget to provide a transform argument?"
)
if not self.transform.optional_metadata:
raise ValueError("_create_metadata called when optional_metadata is None")
return cast(Dict[str, Any], self.transform.optional_metadata(*row))
def pipe(
self,
ids: List[Union[str, int]],
embeddings: Optional[List[List[float]]] = None,
documents: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
verbose: bool = True,
) -> None:
if not embeddings and not documents:
raise ValueError("Both embeddings and documents cannot be None")
if embeddings and documents:
raise ValueError("Both embeddings and documents cannot be provided")
num_rows = len(ids)
num_batches = num_rows // BATCH_SIZE
progress_bar = tqdm(
total=num_rows,
desc="Piping embeddings",
disable=not verbose,
)
for i in range(num_batches + 1):
start = i * BATCH_SIZE
end = (i + 1) * BATCH_SIZE
batch_ids = ids[start:end]
batch_embeddings = embeddings[start:end] if embeddings else None
batch_documents = documents[start:end] if documents else None
batch_metadata = metadata[start:end] if metadata else None
if batch_documents:
batch_embeddings = self.model.create_embeddings(batch_documents)
self.loader.bulk_upsert_embeddings(
target=self.target,
embeddings=cast(List[List[float]], batch_embeddings),
ids=batch_ids,
metadata=batch_metadata,
)
progress_bar.update(BATCH_SIZE)
progress_bar.close()
def pipe_all(self, verbose: bool = True) -> None:
if not self.transform:
raise ValueError(
"Transform expected but got None. Did you forget to provide a transform argument?"
)
total_rows = self.extractor.count(self.transform.relation)
index_checked = False
progress_bar = tqdm(
total=total_rows,
desc="Piping embeddings",
disable=not verbose,
)
for chunk in self.extractor.extract_all(
relation=self.transform.relation,
columns=self.transform.columns,
primary_key=self.transform.primary_key,
chunk_size=BATCH_SIZE,
):
rows = chunk.get("rows")
primary_keys = chunk.get("primary_keys")
if rows and primary_keys:
# Create lists for embeddings and metadata
documents = [self._apply_transform(row) for row in rows]
metadata_list = (
[self._create_metadata(row) for row in rows]
if self.transform.optional_metadata
else None
)
embeddings = self.model.create_embeddings(documents)
# Check and setup index
if not index_checked:
self.loader.check_and_setup_index(
target=self.target, # type: ignore
num_dimensions=len(embeddings[0]),
)
index_checked = True
# Upsert embeddings
self.loader.bulk_upsert_embeddings(
target=self.target,
embeddings=embeddings,
ids=primary_keys,
metadata=metadata_list,
)
progress_bar.update(BATCH_SIZE)
progress_bar.close()
def create_real_time(self, server: RealtimeServer) -> None:
if self.transform is None:
raise ValueError(
"Transform expected but got None. Did you forget to provide a transform argument?"
)
index = self.target.index_name
db_schema_name = self.transform.schema_name
table_name = self.transform.relation
register_connector_conf(
server, index, db_schema_name, table_name, self.source, self.sink
)
wait_for_config_success(server)
def pipe_real_time(self, server: RealtimeServer) -> None:
if self.transform is None:
raise ValueError(
"Transform expected but got None. Did you forget to provide a transform argument?"
)
index = self.target.index_name
db_schema_name = self.transform.schema_name
table_name = self.transform.relation
topic = f"{table_name}.{db_schema_name}.{table_name}"
worker = register_agents(
topic,
index,
server,
self.model.create_embeddings,
self.transform.transform_func,
self.transform.optional_metadata,
)
start_worker(worker)
def teardown(self) -> None:
self.extractor.teardown() | /retake-0.1.14.tar.gz/retake-0.1.14/core/sdk/pipeline.py | 0.91114 | 0.152442 | pipeline.py | pypi |
from pydantic import BaseModel
from typing import Optional, List, Dict
class ElasticSearchSink(BaseModel):
host: Optional[str] = None
user: Optional[str] = None
password: Optional[str] = None
ssl_assert_fingerprint: Optional[str] = None
cloud_id: Optional[str] = None
@property
def config(self) -> dict[str, Optional[str]]:
if self.cloud_id is not None:
return {"cloud_id": self.cloud_id}
return {
"host": self.host,
"user": self.user,
"password": self.password,
"ssl_assert_fingerprint": self.ssl_assert_fingerprint,
}
class OpenSearchSink(BaseModel):
hosts: List[Dict[str, str]]
user: str
password: str
use_ssl: bool
cacerts: str
@property
def config(self) -> dict[str, Optional[str]]:
# Unimplemented
return {}
class PineconeSink(BaseModel):
api_key: str
environment: str
@property
def config(self) -> dict[str, Optional[str]]:
# Unimplemented
return {}
class WeaviateSink(BaseModel):
api_key: str
url: str
@property
def config(self) -> dict[str, Optional[str]]:
# Unimplemented
return {}
class QdrantSink(BaseModel):
host: Optional[str] = None
port: Optional[int] = None
url: Optional[str] = None
api_key: Optional[str] = None
@property
def config(self) -> dict[str, Optional[str]]:
# Unimplemented
return {}
class Sink:
@classmethod
def ElasticSearch(
cls,
host: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
ssl_assert_fingerprint: Optional[str] = None,
cloud_id: Optional[str] = None,
) -> ElasticSearchSink:
params = {
"host": host,
"user": user,
"password": password,
"ssl_assert_fingerprint": ssl_assert_fingerprint,
"cloud_id": cloud_id,
}
# Remove keys with None values
params = {k: v for k, v in params.items() if v is not None}
return ElasticSearchSink(**params)
@classmethod
def OpenSearch(
cls,
hosts: List[Dict[str, str]],
user: str,
password: str,
use_ssl: bool,
cacerts: str,
) -> OpenSearchSink:
return OpenSearchSink(
hosts=hosts, user=user, password=password, use_ssl=use_ssl, cacerts=cacerts
)
@classmethod
def Pinecone(cls, api_key: str, environment: str) -> PineconeSink:
return PineconeSink(
api_key=api_key,
environment=environment,
)
@classmethod
def Weaviate(cls, api_key: str, url: str) -> WeaviateSink:
return WeaviateSink(
api_key=api_key,
url=url,
)
@classmethod
def Qdrant(
cls,
host: Optional[str] = None,
port: Optional[int] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
) -> QdrantSink:
return QdrantSink(host=host, port=port, url=url, api_key=api_key) | /retake-0.1.14.tar.gz/retake-0.1.14/core/sdk/sink.py | 0.89172 | 0.348423 | sink.py | pypi |
import httpx
from opensearchpy import Search
from typing import Any, List, Dict, Optional, Union
class Database:
def __init__(self, host: str, user: str, password: str, port: int, dbname: str):
self.host = host
self.user = user
self.password = password
self.port = port
self.dbname = dbname
class Table:
def __init__(
self,
name: str,
schema: str,
columns: List[str],
transform: Optional[Dict[str, Any]] = None,
relationship: Optional[Dict[str, Any]] = None,
children: Optional[List["Table"]] = None,
):
self.name = name
self.schema = schema
self.columns = columns
self.transform = transform
self.relationship = relationship
self.children = children
def to_schema(self) -> Dict[str, Any]:
schema: Dict[str, Any] = {"table": self.name, "schema": self.schema, "columns": self.columns}
if self.transform:
schema["transform"] = self.transform
if self.relationship:
schema["relationship"] = self.relationship
if self.children:
schema["children"] = [child.to_schema() for child in self.children]
return schema
class Index:
def __init__(self, index_name: str, api_key: str, url: str) -> None:
self.index_name = index_name
self.api_key = api_key
self.url = url
self.headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
def add_source(self, database: Database, table: Table) -> Any:
source = {
"index_name": self.index_name,
"source_host": database.host,
"source_user": database.user,
"source_password": database.password,
"source_port": database.port,
"source_dbname": database.dbname,
}
pgsync_schema: Dict[str, Any] = dict()
pgsync_schema["database"] = database.dbname
pgsync_schema["index"] = self.index_name
pgsync_schema["nodes"] = table.to_schema()
json = {
"source": source,
"pgsync_schema": pgsync_schema,
}
print(
f"Preparing to sync index {self.index_name} with table {table.name}. This may take some time if your table is large..."
)
with httpx.Client(timeout=None) as http:
response = http.post(
f"{self.url}/index/add_source", headers=self.headers, json=json
)
if not response.status_code == 200:
raise Exception(response.text)
def search(self, search: Search) -> Any:
json = {
"dsl": search.to_dict(), # type: ignore
"index_name": self.index_name,
}
with httpx.Client(timeout=None) as http:
response = http.post(
f"{self.url}/index/search", headers=self.headers, json=json
)
if response.status_code == 200:
return response.json()
else:
raise Exception(response.text)
def upsert(
self, documents: List[Dict[str, Any]], ids: List[Union[str, int]]
) -> Any:
json = {"index_name": self.index_name, "documents": documents, "ids": ids}
with httpx.Client(timeout=None) as http:
response = http.post(
f"{self.url}/index/upsert", headers=self.headers, json=json
)
if response.status_code == 200:
return response.json()
else:
raise Exception(response.text)
def create_field(self, field_name: str, field_type: str) -> None:
json = {
"index_name": self.index_name,
"field_name": field_name,
"field_type": field_type,
}
with httpx.Client(timeout=None) as http:
response = http.post(
f"{self.url}/index/field/create", headers=self.headers, json=json
)
if not response.status_code == 200:
raise Exception(response.text)
def vectorize(self, field_names: List[str]) -> None:
json = {
"index_name": self.index_name,
"field_names": field_names,
}
with httpx.Client(timeout=None) as http:
response = http.post(
f"{self.url}/index/vectorize", headers=self.headers, json=json
)
if not response.status_code == 200:
raise Exception(response.text) | /retakesearch_fork-0.2.0.tar.gz/retakesearch_fork-0.2.0/retakesearch/index.py | 0.634317 | 0.18838 | index.py | pypi |
import sys
import requests
PY3 = sys.version_info[0] == 3
if PY3:
from urllib.parse import parse_qs, urlencode, urlparse
def fetch_url(prepared_request): # type: ignore
"""
This is a util method that helps in reconstructing the request url.
:param prepared_request: unsigned request
:return: reconstructed url
"""
url = urlparse(prepared_request.url)
path = url.path or "/"
# fetch the query string if present in the request
querystring = ""
if url.query:
querystring = "?" + urlencode(
parse_qs(url.query, keep_blank_values=True), doseq=True
)
# fetch the host information from headers
headers = dict(
(key.lower(), value) for key, value in prepared_request.headers.items()
)
location = headers.get("host") or url.netloc
# construct the url and return
return url.scheme + "://" + location + path + querystring
class AWSV4SignerAuth(requests.auth.AuthBase):
"""
AWS V4 Request Signer for Requests.
"""
def __init__(self, credentials, region, service="es"): # type: ignore
if not credentials:
raise ValueError("Credentials cannot be empty")
self.credentials = credentials
if not region:
raise ValueError("Region cannot be empty")
self.region = region
if not service:
raise ValueError("Service name cannot be empty")
self.service = service
def __call__(self, request): # type: ignore
return self._sign_request(request) # type: ignore
def _sign_request(self, prepared_request): # type: ignore
"""
This method helps in signing the request by injecting the required headers.
:param prepared_request: unsigned request
:return: signed request
"""
from botocore.auth import SigV4Auth
from botocore.awsrequest import AWSRequest
url = fetch_url(prepared_request) # type: ignore
# create an AWS request object and sign it using SigV4Auth
aws_request = AWSRequest(
method=prepared_request.method.upper(),
url=url,
data=prepared_request.body,
)
sig_v4_auth = SigV4Auth(self.credentials, self.service, self.region)
sig_v4_auth.add_auth(aws_request)
# copy the headers from AWS request object into the prepared_request
prepared_request.headers.update(dict(aws_request.headers.items()))
prepared_request.headers["X-Amz-Content-SHA256"] = sig_v4_auth.payload(
aws_request
)
return prepared_request | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/helpers/signer.py | 0.477798 | 0.212722 | signer.py | pypi |
from datetime import datetime, timedelta
from six import iteritems, itervalues
from opensearchpy.helpers.aggs import A
from .query import MatchAll, Nested, Range, Terms
from .response import Response
from .search import Search
from .utils import AttrDict
__all__ = [
"FacetedSearch",
"HistogramFacet",
"TermsFacet",
"DateHistogramFacet",
"RangeFacet",
"NestedFacet",
]
class Facet(object):
"""
A facet on faceted search. Wraps and aggregation and provides functionality
to create a filter for selected values and return a list of facet values
from the result of the aggregation.
"""
agg_type = None
def __init__(self, metric=None, metric_sort="desc", **kwargs):
self.filter_values = ()
self._params = kwargs
self._metric = metric
if metric and metric_sort:
self._params["order"] = {"metric": metric_sort}
def get_aggregation(self):
"""
Return the aggregation object.
"""
agg = A(self.agg_type, **self._params)
if self._metric:
agg.metric("metric", self._metric)
return agg
def add_filter(self, filter_values):
"""
Construct a filter.
"""
if not filter_values:
return
f = self.get_value_filter(filter_values[0])
for v in filter_values[1:]:
f |= self.get_value_filter(v)
return f
def get_value_filter(self, filter_value):
"""
Construct a filter for an individual value
"""
pass
def is_filtered(self, key, filter_values):
"""
Is a filter active on the given key.
"""
return key in filter_values
def get_value(self, bucket):
"""
return a value representing a bucket. Its key as default.
"""
return bucket["key"]
def get_metric(self, bucket):
"""
Return a metric, by default doc_count for a bucket.
"""
if self._metric:
return bucket["metric"]["value"]
return bucket["doc_count"]
def get_values(self, data, filter_values):
"""
Turn the raw bucket data into a list of tuples containing the key,
number of documents and a flag indicating whether this value has been
selected or not.
"""
out = []
for bucket in data.buckets:
key = self.get_value(bucket)
out.append(
(key, self.get_metric(bucket), self.is_filtered(key, filter_values))
)
return out
class TermsFacet(Facet):
agg_type = "terms"
def add_filter(self, filter_values):
"""Create a terms filter instead of bool containing term filters."""
if filter_values:
return Terms(
_expand__to_dot=False, **{self._params["field"]: filter_values}
)
class RangeFacet(Facet):
agg_type = "range"
def _range_to_dict(self, range):
key, range = range
out = {"key": key}
if range[0] is not None:
out["from"] = range[0]
if range[1] is not None:
out["to"] = range[1]
return out
def __init__(self, ranges, **kwargs):
super(RangeFacet, self).__init__(**kwargs)
self._params["ranges"] = list(map(self._range_to_dict, ranges))
self._params["keyed"] = False
self._ranges = dict(ranges)
def get_value_filter(self, filter_value):
f, t = self._ranges[filter_value]
limits = {}
if f is not None:
limits["gte"] = f
if t is not None:
limits["lt"] = t
return Range(_expand__to_dot=False, **{self._params["field"]: limits})
class HistogramFacet(Facet):
agg_type = "histogram"
def get_value_filter(self, filter_value):
return Range(
_expand__to_dot=False,
**{
self._params["field"]: {
"gte": filter_value,
"lt": filter_value + self._params["interval"],
}
}
)
def _date_interval_year(d):
return d.replace(
year=d.year + 1, day=(28 if d.month == 2 and d.day == 29 else d.day)
)
def _date_interval_month(d):
return (d + timedelta(days=32)).replace(day=1)
def _date_interval_week(d):
return d + timedelta(days=7)
def _date_interval_day(d):
return d + timedelta(days=1)
def _date_interval_hour(d):
return d + timedelta(hours=1)
class DateHistogramFacet(Facet):
agg_type = "date_histogram"
DATE_INTERVALS = {
"year": _date_interval_year,
"1Y": _date_interval_year,
"month": _date_interval_month,
"1M": _date_interval_month,
"week": _date_interval_week,
"1w": _date_interval_week,
"day": _date_interval_day,
"1d": _date_interval_day,
"hour": _date_interval_hour,
"1h": _date_interval_hour,
}
def __init__(self, **kwargs):
kwargs.setdefault("min_doc_count", 0)
super(DateHistogramFacet, self).__init__(**kwargs)
def get_value(self, bucket):
if not isinstance(bucket["key"], datetime):
# OpenSearch returns key=None instead of 0 for date 1970-01-01,
# so we need to set key to 0 to avoid TypeError exception
if bucket["key"] is None:
bucket["key"] = 0
# Preserve milliseconds in the datetime
return datetime.utcfromtimestamp(int(bucket["key"]) / 1000.0)
else:
return bucket["key"]
def get_value_filter(self, filter_value):
for interval_type in ("calendar_interval", "fixed_interval"):
if interval_type in self._params:
break
else:
interval_type = "interval"
return Range(
_expand__to_dot=False,
**{
self._params["field"]: {
"gte": filter_value,
"lt": self.DATE_INTERVALS[self._params[interval_type]](
filter_value
),
}
}
)
class NestedFacet(Facet):
agg_type = "nested"
def __init__(self, path, nested_facet):
self._path = path
self._inner = nested_facet
super(NestedFacet, self).__init__(
path=path, aggs={"inner": nested_facet.get_aggregation()}
)
def get_values(self, data, filter_values):
return self._inner.get_values(data.inner, filter_values)
def add_filter(self, filter_values):
inner_q = self._inner.add_filter(filter_values)
if inner_q:
return Nested(path=self._path, query=inner_q)
class FacetedResponse(Response):
@property
def query_string(self):
return self._faceted_search._query
@property
def facets(self):
if not hasattr(self, "_facets"):
super(AttrDict, self).__setattr__("_facets", AttrDict({}))
for name, facet in iteritems(self._faceted_search.facets):
self._facets[name] = facet.get_values(
getattr(getattr(self.aggregations, "_filter_" + name), name),
self._faceted_search.filter_values.get(name, ()),
)
return self._facets
class FacetedSearch(object):
"""
Abstraction for creating faceted navigation searches that takes care of
composing the queries, aggregations and filters as needed as well as
presenting the results in an easy-to-consume fashion::
class BlogSearch(FacetedSearch):
index = 'blogs'
doc_types = [Blog, Post]
fields = ['title^5', 'category', 'description', 'body']
facets = {
'type': TermsFacet(field='_type'),
'category': TermsFacet(field='category'),
'weekly_posts': DateHistogramFacet(field='published_from', interval='week')
}
def search(self):
' Override search to add your own filters '
s = super(BlogSearch, self).search()
return s.filter('term', published=True)
# when using:
blog_search = BlogSearch("web framework", filters={"category": "python"})
# supports pagination
blog_search[10:20]
response = blog_search.execute()
# easy access to aggregation results:
for category, hit_count, is_selected in response.facets.category:
print(
"Category %s has %d hits%s." % (
category,
hit_count,
' and is chosen' if is_selected else ''
)
)
"""
index = None
doc_types = None
fields = None
facets = {}
using = "default"
def __init__(self, query=None, filters={}, sort=()):
"""
:arg query: the text to search for
:arg filters: facet values to filter
:arg sort: sort information to be passed to :class:`~opensearchpy.Search`
"""
self._query = query
self._filters = {}
self._sort = sort
self.filter_values = {}
for name, value in iteritems(filters):
self.add_filter(name, value)
self._s = self.build_search()
def count(self):
return self._s.count()
def __getitem__(self, k):
self._s = self._s[k]
return self
def __iter__(self):
return iter(self._s)
def add_filter(self, name, filter_values):
"""
Add a filter for a facet.
"""
# normalize the value into a list
if not isinstance(filter_values, (tuple, list)):
if filter_values is None:
return
filter_values = [
filter_values,
]
# remember the filter values for use in FacetedResponse
self.filter_values[name] = filter_values
# get the filter from the facet
f = self.facets[name].add_filter(filter_values)
if f is None:
return
self._filters[name] = f
def search(self):
"""
Returns the base Search object to which the facets are added.
You can customize the query by overriding this method and returning a
modified search object.
"""
s = Search(doc_type=self.doc_types, index=self.index, using=self.using)
return s.response_class(FacetedResponse)
def query(self, search, query):
"""
Add query part to ``search``.
Override this if you wish to customize the query used.
"""
if query:
if self.fields:
return search.query("multi_match", fields=self.fields, query=query)
else:
return search.query("multi_match", query=query)
return search
def aggregate(self, search):
"""
Add aggregations representing the facets selected, including potential
filters.
"""
for f, facet in iteritems(self.facets):
agg = facet.get_aggregation()
agg_filter = MatchAll()
for field, filter in iteritems(self._filters):
if f == field:
continue
agg_filter &= filter
search.aggs.bucket("_filter_" + f, "filter", filter=agg_filter).bucket(
f, agg
)
def filter(self, search):
"""
Add a ``post_filter`` to the search request narrowing the results based
on the facet filters.
"""
if not self._filters:
return search
post_filter = MatchAll()
for f in itervalues(self._filters):
post_filter &= f
return search.post_filter(post_filter)
def highlight(self, search):
"""
Add highlighting for all the fields
"""
return search.highlight(
*(f if "^" not in f else f.split("^", 1)[0] for f in self.fields)
)
def sort(self, search):
"""
Add sorting information to the request.
"""
if self._sort:
search = search.sort(*self._sort)
return search
def build_search(self):
"""
Construct the ``Search`` object.
"""
s = self.search()
s = self.query(s, self._query)
s = self.filter(s)
if self.fields:
s = self.highlight(s)
s = self.sort(s)
self.aggregate(s)
return s
def execute(self):
"""
Execute the search and return the response.
"""
r = self._s.execute()
r._faceted_search = self
return r | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/helpers/faceted_search.py | 0.925873 | 0.357343 | faceted_search.py | pypi |
try:
import collections.abc as collections_abc # only works on python 3.3+
except ImportError:
import collections as collections_abc
from .response.aggs import AggResponse, BucketData, FieldBucketData, TopHitsData
from .utils import DslBase
def A(name_or_agg, filter=None, **params):
if filter is not None:
if name_or_agg != "filter":
raise ValueError(
"Aggregation %r doesn't accept positional argument 'filter'."
% name_or_agg
)
params["filter"] = filter
# {"terms": {"field": "tags"}, "aggs": {...}}
if isinstance(name_or_agg, collections_abc.Mapping):
if params:
raise ValueError("A() cannot accept parameters when passing in a dict.")
# copy to avoid modifying in-place
agg = name_or_agg.copy()
# pop out nested aggs
aggs = agg.pop("aggs", None)
# pop out meta data
meta = agg.pop("meta", None)
# should be {"terms": {"field": "tags"}}
if len(agg) != 1:
raise ValueError(
'A() can only accept dict with an aggregation ({"terms": {...}}). '
"Instead it got (%r)" % name_or_agg
)
agg_type, params = agg.popitem()
if aggs:
params = params.copy()
params["aggs"] = aggs
if meta:
params = params.copy()
params["meta"] = meta
return Agg.get_dsl_class(agg_type)(_expand__to_dot=False, **params)
# Terms(...) just return the nested agg
elif isinstance(name_or_agg, Agg):
if params:
raise ValueError(
"A() cannot accept parameters when passing in an Agg object."
)
return name_or_agg
# "terms", field="tags"
return Agg.get_dsl_class(name_or_agg)(**params)
class Agg(DslBase):
_type_name = "agg"
_type_shortcut = staticmethod(A)
name = None
def __contains__(self, key):
return False
def to_dict(self):
d = super(Agg, self).to_dict()
if "meta" in d[self.name]:
d["meta"] = d[self.name].pop("meta")
return d
def result(self, search, data):
return AggResponse(self, search, data)
class AggBase(object):
_param_defs = {
"aggs": {"type": "agg", "hash": True},
}
def __contains__(self, key):
return key in self._params.get("aggs", {})
def __getitem__(self, agg_name):
agg = self._params.setdefault("aggs", {})[agg_name] # propagate KeyError
# make sure we're not mutating a shared state - whenever accessing a
# bucket, return a shallow copy of it to be safe
if isinstance(agg, Bucket):
agg = A(agg.name, **agg._params)
# be sure to store the copy so any modifications to it will affect us
self._params["aggs"][agg_name] = agg
return agg
def __setitem__(self, agg_name, agg):
self.aggs[agg_name] = A(agg)
def __iter__(self):
return iter(self.aggs)
def _agg(self, bucket, name, agg_type, *args, **params):
agg = self[name] = A(agg_type, *args, **params)
# For chaining - when creating new buckets return them...
if bucket:
return agg
# otherwise return self._base so we can keep chaining
else:
return self._base
def metric(self, name, agg_type, *args, **params):
return self._agg(False, name, agg_type, *args, **params)
def bucket(self, name, agg_type, *args, **params):
return self._agg(True, name, agg_type, *args, **params)
def pipeline(self, name, agg_type, *args, **params):
return self._agg(False, name, agg_type, *args, **params)
def result(self, search, data):
return BucketData(self, search, data)
class Bucket(AggBase, Agg):
def __init__(self, **params):
super(Bucket, self).__init__(**params)
# remember self for chaining
self._base = self
def to_dict(self):
d = super(AggBase, self).to_dict()
if "aggs" in d[self.name]:
d["aggs"] = d[self.name].pop("aggs")
return d
class Filter(Bucket):
name = "filter"
_param_defs = {
"filter": {"type": "query"},
"aggs": {"type": "agg", "hash": True},
}
def __init__(self, filter=None, **params):
if filter is not None:
params["filter"] = filter
super(Filter, self).__init__(**params)
def to_dict(self):
d = super(Filter, self).to_dict()
d[self.name].update(d[self.name].pop("filter", {}))
return d
class Pipeline(Agg):
pass
# bucket aggregations
class Filters(Bucket):
name = "filters"
_param_defs = {
"filters": {"type": "query", "hash": True},
"aggs": {"type": "agg", "hash": True},
}
class Children(Bucket):
name = "children"
class Parent(Bucket):
name = "parent"
class DateHistogram(Bucket):
name = "date_histogram"
def result(self, search, data):
return FieldBucketData(self, search, data)
class AutoDateHistogram(DateHistogram):
name = "auto_date_histogram"
class DateRange(Bucket):
name = "date_range"
class GeoDistance(Bucket):
name = "geo_distance"
class GeohashGrid(Bucket):
name = "geohash_grid"
class GeotileGrid(Bucket):
name = "geotile_grid"
class GeoCentroid(Bucket):
name = "geo_centroid"
class Global(Bucket):
name = "global"
class Histogram(Bucket):
name = "histogram"
def result(self, search, data):
return FieldBucketData(self, search, data)
class IPRange(Bucket):
name = "ip_range"
class Missing(Bucket):
name = "missing"
class Nested(Bucket):
name = "nested"
class Range(Bucket):
name = "range"
class RareTerms(Bucket):
name = "rare_terms"
def result(self, search, data):
return FieldBucketData(self, search, data)
class ReverseNested(Bucket):
name = "reverse_nested"
class SignificantTerms(Bucket):
name = "significant_terms"
class SignificantText(Bucket):
name = "significant_text"
class Terms(Bucket):
name = "terms"
def result(self, search, data):
return FieldBucketData(self, search, data)
class Sampler(Bucket):
name = "sampler"
class DiversifiedSampler(Bucket):
name = "diversified_sampler"
class Composite(Bucket):
name = "composite"
_param_defs = {
"sources": {"type": "agg", "hash": True, "multi": True},
"aggs": {"type": "agg", "hash": True},
}
class VariableWidthHistogram(Bucket):
name = "variable_width_histogram"
def result(self, search, data):
return FieldBucketData(self, search, data)
# metric aggregations
class TopHits(Agg):
name = "top_hits"
def result(self, search, data):
return TopHitsData(self, search, data)
class Avg(Agg):
name = "avg"
class WeightedAvg(Agg):
name = "weighted_avg"
class Cardinality(Agg):
name = "cardinality"
class ExtendedStats(Agg):
name = "extended_stats"
class Boxplot(Agg):
name = "boxplot"
class GeoBounds(Agg):
name = "geo_bounds"
class Max(Agg):
name = "max"
class MedianAbsoluteDeviation(Agg):
name = "median_absolute_deviation"
class Min(Agg):
name = "min"
class Percentiles(Agg):
name = "percentiles"
class PercentileRanks(Agg):
name = "percentile_ranks"
class ScriptedMetric(Agg):
name = "scripted_metric"
class Stats(Agg):
name = "stats"
class Sum(Agg):
name = "sum"
class TTest(Agg):
name = "t_test"
class ValueCount(Agg):
name = "value_count"
# pipeline aggregations
class AvgBucket(Pipeline):
name = "avg_bucket"
class BucketScript(Pipeline):
name = "bucket_script"
class BucketSelector(Pipeline):
name = "bucket_selector"
class CumulativeSum(Pipeline):
name = "cumulative_sum"
class CumulativeCardinality(Pipeline):
name = "cumulative_cardinality"
class Derivative(Pipeline):
name = "derivative"
class ExtendedStatsBucket(Pipeline):
name = "extended_stats_bucket"
class Inference(Pipeline):
name = "inference"
class MaxBucket(Pipeline):
name = "max_bucket"
class MinBucket(Pipeline):
name = "min_bucket"
class MovingFn(Pipeline):
name = "moving_fn"
class MovingAvg(Pipeline):
name = "moving_avg"
class MovingPercentiles(Pipeline):
name = "moving_percentiles"
class Normalize(Pipeline):
name = "normalize"
class PercentilesBucket(Pipeline):
name = "percentiles_bucket"
class SerialDiff(Pipeline):
name = "serial_diff"
class StatsBucket(Pipeline):
name = "stats_bucket"
class SumBucket(Pipeline):
name = "sum_bucket"
class BucketSort(Pipeline):
name = "bucket_sort" | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/helpers/aggs.py | 0.674158 | 0.239444 | aggs.py | pypi |
import six
from opensearchpy.connection.connections import get_connection
from opensearchpy.helpers.utils import AttrDict, DslBase, merge
__all__ = ["tokenizer", "analyzer", "char_filter", "token_filter", "normalizer"]
class AnalysisBase(object):
@classmethod
def _type_shortcut(cls, name_or_instance, type=None, **kwargs):
if isinstance(name_or_instance, cls):
if type or kwargs:
raise ValueError("%s() cannot accept parameters." % cls.__name__)
return name_or_instance
if not (type or kwargs):
return cls.get_dsl_class("builtin")(name_or_instance)
return cls.get_dsl_class(type, "custom")(
name_or_instance, type or "custom", **kwargs
)
class CustomAnalysis(object):
name = "custom"
def __init__(self, filter_name, builtin_type="custom", **kwargs):
self._builtin_type = builtin_type
self._name = filter_name
super(CustomAnalysis, self).__init__(**kwargs)
def to_dict(self):
# only name to present in lists
return self._name
def get_definition(self):
d = super(CustomAnalysis, self).to_dict()
d = d.pop(self.name)
d["type"] = self._builtin_type
return d
class CustomAnalysisDefinition(CustomAnalysis):
def get_analysis_definition(self):
out = {self._type_name: {self._name: self.get_definition()}}
t = getattr(self, "tokenizer", None)
if "tokenizer" in self._param_defs and hasattr(t, "get_definition"):
out["tokenizer"] = {t._name: t.get_definition()}
filters = {
f._name: f.get_definition()
for f in self.filter
if hasattr(f, "get_definition")
}
if filters:
out["filter"] = filters
# any sub filter definitions like multiplexers etc?
for f in self.filter:
if hasattr(f, "get_analysis_definition"):
d = f.get_analysis_definition()
if d:
merge(out, d, True)
char_filters = {
f._name: f.get_definition()
for f in self.char_filter
if hasattr(f, "get_definition")
}
if char_filters:
out["char_filter"] = char_filters
return out
class BuiltinAnalysis(object):
name = "builtin"
def __init__(self, name):
self._name = name
super(BuiltinAnalysis, self).__init__()
def to_dict(self):
# only name to present in lists
return self._name
class Analyzer(AnalysisBase, DslBase):
_type_name = "analyzer"
name = None
class BuiltinAnalyzer(BuiltinAnalysis, Analyzer):
def get_analysis_definition(self):
return {}
class CustomAnalyzer(CustomAnalysisDefinition, Analyzer):
_param_defs = {
"filter": {"type": "token_filter", "multi": True},
"char_filter": {"type": "char_filter", "multi": True},
"tokenizer": {"type": "tokenizer"},
}
def simulate(self, text, using="default", explain=False, attributes=None):
"""
Use the Analyze API of opensearch to test the outcome of this analyzer.
:arg text: Text to be analyzed
:arg using: connection alias to use, defaults to ``'default'``
:arg explain: will output all token attributes for each token. You can
filter token attributes you want to output by setting ``attributes``
option.
:arg attributes: if ``explain`` is specified, filter the token
attributes to return.
"""
opensearch = get_connection(using)
body = {"text": text, "explain": explain}
if attributes:
body["attributes"] = attributes
definition = self.get_analysis_definition()
analyzer_def = self.get_definition()
for section in ("tokenizer", "char_filter", "filter"):
if section not in analyzer_def:
continue
sec_def = definition.get(section, {})
sec_names = analyzer_def[section]
if isinstance(sec_names, six.string_types):
body[section] = sec_def.get(sec_names, sec_names)
else:
body[section] = [
sec_def.get(sec_name, sec_name) for sec_name in sec_names
]
if self._builtin_type != "custom":
body["analyzer"] = self._builtin_type
return AttrDict(opensearch.indices.analyze(body=body))
class Normalizer(AnalysisBase, DslBase):
_type_name = "normalizer"
name = None
class BuiltinNormalizer(BuiltinAnalysis, Normalizer):
def get_analysis_definition(self):
return {}
class CustomNormalizer(CustomAnalysisDefinition, Normalizer):
_param_defs = {
"filter": {"type": "token_filter", "multi": True},
"char_filter": {"type": "char_filter", "multi": True},
}
class Tokenizer(AnalysisBase, DslBase):
_type_name = "tokenizer"
name = None
class BuiltinTokenizer(BuiltinAnalysis, Tokenizer):
pass
class CustomTokenizer(CustomAnalysis, Tokenizer):
pass
class TokenFilter(AnalysisBase, DslBase):
_type_name = "token_filter"
name = None
class BuiltinTokenFilter(BuiltinAnalysis, TokenFilter):
pass
class CustomTokenFilter(CustomAnalysis, TokenFilter):
pass
class MultiplexerTokenFilter(CustomTokenFilter):
name = "multiplexer"
def get_definition(self):
d = super(CustomTokenFilter, self).get_definition()
if "filters" in d:
d["filters"] = [
# comma delimited string given by user
fs if isinstance(fs, six.string_types) else
# list of strings or TokenFilter objects
", ".join(f.to_dict() if hasattr(f, "to_dict") else f for f in fs)
for fs in self.filters
]
return d
def get_analysis_definition(self):
if not hasattr(self, "filters"):
return {}
fs = {}
d = {"filter": fs}
for filters in self.filters:
if isinstance(filters, six.string_types):
continue
fs.update(
{
f._name: f.get_definition()
for f in filters
if hasattr(f, "get_definition")
}
)
return d
class ConditionalTokenFilter(CustomTokenFilter):
name = "condition"
def get_definition(self):
d = super(CustomTokenFilter, self).get_definition()
if "filter" in d:
d["filter"] = [
f.to_dict() if hasattr(f, "to_dict") else f for f in self.filter
]
return d
def get_analysis_definition(self):
if not hasattr(self, "filter"):
return {}
return {
"filter": {
f._name: f.get_definition()
for f in self.filter
if hasattr(f, "get_definition")
}
}
class CharFilter(AnalysisBase, DslBase):
_type_name = "char_filter"
name = None
class BuiltinCharFilter(BuiltinAnalysis, CharFilter):
pass
class CustomCharFilter(CustomAnalysis, CharFilter):
pass
# shortcuts for direct use
analyzer = Analyzer._type_shortcut
tokenizer = Tokenizer._type_shortcut
token_filter = TokenFilter._type_shortcut
char_filter = CharFilter._type_shortcut
normalizer = Normalizer._type_shortcut | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/helpers/analysis.py | 0.734786 | 0.16388 | analysis.py | pypi |
from opensearchpy.connection.connections import get_connection
from ..helpers.query import Bool, Q
from ..helpers.search import ProxyDescriptor, QueryProxy, Request
from .response import UpdateByQueryResponse
from .utils import recursive_to_dict
class UpdateByQuery(Request):
query = ProxyDescriptor("query")
def __init__(self, **kwargs):
"""
Update by query request to opensearch.
:arg using: `OpenSearch` instance to use
:arg index: limit the search to index
:arg doc_type: only query this type.
All the parameters supplied (or omitted) at creation type can be later
overridden by methods (`using`, `index` and `doc_type` respectively).
"""
super(UpdateByQuery, self).__init__(**kwargs)
self._response_class = UpdateByQueryResponse
self._script = {}
self._query_proxy = QueryProxy(self, "query")
def filter(self, *args, **kwargs):
return self.query(Bool(filter=[Q(*args, **kwargs)]))
def exclude(self, *args, **kwargs):
return self.query(Bool(filter=[~Q(*args, **kwargs)]))
@classmethod
def from_dict(cls, d):
"""
Construct a new `UpdateByQuery` instance from a raw dict containing the search
body. Useful when migrating from raw dictionaries.
Example::
ubq = UpdateByQuery.from_dict({
"query": {
"bool": {
"must": [...]
}
},
"script": {...}
})
ubq = ubq.filter('term', published=True)
"""
u = cls()
u.update_from_dict(d)
return u
def _clone(self):
"""
Return a clone of the current search request. Performs a shallow copy
of all the underlying objects. Used internally by most state modifying
APIs.
"""
ubq = super(UpdateByQuery, self)._clone()
ubq._response_class = self._response_class
ubq._script = self._script.copy()
ubq.query._proxied = self.query._proxied
return ubq
def response_class(self, cls):
"""
Override the default wrapper used for the response.
"""
ubq = self._clone()
ubq._response_class = cls
return ubq
def update_from_dict(self, d):
"""
Apply options from a serialized body to the current instance. Modifies
the object in-place. Used mostly by ``from_dict``.
"""
d = d.copy()
if "query" in d:
self.query._proxied = Q(d.pop("query"))
if "script" in d:
self._script = d.pop("script")
self._extra.update(d)
return self
def script(self, **kwargs):
"""
Define update action to take:
Note: the API only accepts a single script, so
calling the script multiple times will overwrite.
Example::
ubq = Search()
ubq = ubq.script(source="ctx._source.likes++"")
ubq = ubq.script(source="ctx._source.likes += params.f"",
lang="expression",
params={'f': 3})
"""
ubq = self._clone()
if ubq._script:
ubq._script = {}
ubq._script.update(kwargs)
return ubq
def to_dict(self, **kwargs):
"""
Serialize the search into the dictionary that will be sent over as the
request'ubq body.
All additional keyword arguments will be included into the dictionary.
"""
d = {}
if self.query:
d["query"] = self.query.to_dict()
if self._script:
d["script"] = self._script
d.update(recursive_to_dict(self._extra))
d.update(recursive_to_dict(kwargs))
return d
def execute(self):
"""
Execute the search and return an instance of ``Response`` wrapping all
the data.
"""
opensearch = get_connection(self._using)
self._response = self._response_class(
self,
opensearch.update_by_query(
index=self._index, body=self.to_dict(), **self._params
),
)
return self._response | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/helpers/update_by_query.py | 0.88299 | 0.228737 | update_by_query.py | pypi |
from ..client.utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class IndexManagementClient(NamespacedClient):
@query_params()
def put_policy(self, policy, body=None, params=None, headers=None):
"""
Creates, or updates, a policy.
:arg policy: The name of the policy
"""
if policy in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'policy'.")
return self.transport.perform_request(
"PUT",
_make_path("_plugins", "_ism", "policies", policy),
params=params,
headers=headers,
body=body,
)
@query_params()
def add_policy(self, index, body=None, params=None, headers=None):
"""
Adds a policy to an index. This operation does not change the policy if the index already has one.
:arg index: The name of the index to add policy on
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"POST",
_make_path("_plugins", "_ism", "add", index),
params=params,
headers=headers,
body=body,
)
@query_params()
def get_policy(self, policy, params=None, headers=None):
"""
Gets the policy by `policy_id`.
:arg policy: The name of the policy
"""
if policy in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'policy'.")
return self.transport.perform_request(
"GET",
_make_path("_plugins", "_ism", "policies", policy),
params=params,
headers=headers,
)
@query_params()
def remove_policy_from_index(self, index, params=None, headers=None):
"""
Removes any ISM policy from the index.
:arg index: The name of the index to remove policy on
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"POST",
_make_path("_plugins", "_ism", "remove", index),
params=params,
headers=headers,
)
@query_params()
def change_policy(self, index, body=None, params=None, headers=None):
"""
Updates the managed index policy to a new policy (or to a new version of the policy).
:arg index: The name of the index to change policy on
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"POST",
_make_path("_plugins", "_ism", "change_policy", index),
params=params,
headers=headers,
body=body,
)
@query_params()
def retry(self, index, body=None, params=None, headers=None):
"""
Retries the failed action for an index.
:arg index: The name of the index whose is in a failed state
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"POST",
_make_path("_plugins", "_ism", "retry", index),
params=params,
headers=headers,
body=body,
)
@query_params("show_policy")
def explain_index(self, index, params=None, headers=None):
"""
Gets the current state of the index.
:arg index: The name of the index to explain
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"GET",
_make_path("_plugins", "_ism", "explain", index),
params=params,
headers=headers,
)
@query_params()
def delete_policy(self, policy, params=None, headers=None):
"""
Deletes the policy by `policy_id`.
:arg policy: The name of the policy to delete
"""
if policy in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'policy'.")
return self.transport.perform_request(
"DELETE",
_make_path("_plugins", "_ism", "policies", policy),
params=params,
headers=headers,
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/plugins/index_management.py | 0.701509 | 0.257281 | index_management.py | pypi |
from ..client.utils import NamespacedClient, _make_path, query_params
class AlertingClient(NamespacedClient):
@query_params()
def search_monitor(self, body, params=None, headers=None):
"""
Returns the search result for a monitor.
:arg monitor_id: The configuration for the monitor we are trying to search
"""
return self.transport.perform_request(
"GET",
_make_path("_plugins", "_alerting", "monitors", "_search"),
params=params,
headers=headers,
body=body,
)
@query_params()
def get_monitor(self, monitor_id, params=None, headers=None):
"""
Returns the details of a specific monitor.
:arg monitor_id: The id of the monitor we are trying to fetch
"""
return self.transport.perform_request(
"GET",
_make_path("_plugins", "_alerting", "monitors", monitor_id),
params=params,
headers=headers,
)
@query_params("dryrun")
def run_monitor(self, monitor_id, params=None, headers=None):
"""
Runs/Executes a specific monitor.
:arg monitor_id: The id of the monitor we are trying to execute
:arg dryrun: Shows the results of a run without actions sending any message
"""
return self.transport.perform_request(
"POST",
_make_path("_plugins", "_alerting", "monitors", monitor_id, "_execute"),
params=params,
headers=headers,
)
@query_params()
def create_monitor(self, body=None, params=None, headers=None):
"""
Creates a monitor with inputs, triggers, and actions.
:arg body: The configuration for the monitor (`inputs`, `triggers`, and `actions`)
"""
return self.transport.perform_request(
"POST",
_make_path("_plugins", "_alerting", "monitors"),
params=params,
headers=headers,
body=body,
)
@query_params()
def update_monitor(self, monitor_id, body=None, params=None, headers=None):
"""
Updates a monitor's inputs, triggers, and actions.
:arg monitor_id: The id of the monitor we are trying to update
:arg body: The configuration for the monitor (`inputs`, `triggers`, and `actions`)
"""
return self.transport.perform_request(
"PUT",
_make_path("_plugins", "_alerting", "monitors", monitor_id),
params=params,
headers=headers,
body=body,
)
@query_params()
def delete_monitor(self, monitor_id, params=None, headers=None):
"""
Deletes a specific monitor.
:arg monitor_id: The id of the monitor we are trying to delete
"""
return self.transport.perform_request(
"DELETE",
_make_path("_plugins", "_alerting", "monitors", monitor_id),
params=params,
headers=headers,
)
@query_params()
def get_destination(self, destination_id=None, params=None, headers=None):
"""
Returns the details of a specific destination.
:arg destination_id: The id of the destination we are trying to fetch. If None, returns all destinations
"""
return self.transport.perform_request(
"GET",
_make_path("_plugins", "_alerting", "destinations", destination_id)
if destination_id
else _make_path("_plugins", "_alerting", "destinations"),
params=params,
headers=headers,
)
@query_params()
def create_destination(self, body=None, params=None, headers=None):
"""
Creates a destination for slack, mail, or custom-webhook.
:arg body: The configuration for the destination
"""
return self.transport.perform_request(
"POST",
_make_path("_plugins", "_alerting", "destinations"),
params=params,
headers=headers,
body=body,
)
@query_params()
def update_destination(self, destination_id, body=None, params=None, headers=None):
"""
Updates a destination's inputs, triggers, and actions.
:arg destination_id: The id of the destination we are trying to update
:arg body: The configuration for the destination
"""
return self.transport.perform_request(
"PUT",
_make_path("_plugins", "_alerting", "destinations", destination_id),
params=params,
headers=headers,
body=body,
)
@query_params()
def delete_destination(self, destination_id, params=None, headers=None):
"""
Deletes a specific destination.
:arg destination_id: The id of the destination we are trying to delete
"""
return self.transport.perform_request(
"DELETE",
_make_path("_plugins", "_alerting", "destinations", destination_id),
params=params,
headers=headers,
)
@query_params()
def get_alerts(self, params=None, headers=None):
"""
Returns all alerts.
"""
return self.transport.perform_request(
"GET",
_make_path("_plugins", "_alerting", "monitors", "alerts"),
params=params,
headers=headers,
)
@query_params()
def acknowledge_alert(self, monitor_id, body=None, params=None, headers=None):
"""
Acknowledges an alert.
:arg monitor_id: The id of the monitor, the alert belongs to
:arg body: The alerts to be acknowledged
"""
return self.transport.perform_request(
"POST",
_make_path(
"_plugins",
"_alerting",
"monitors",
monitor_id,
"_acknowledge",
"alerts",
),
params=params,
headers=headers,
body=body,
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/plugins/alerting.py | 0.765593 | 0.277908 | alerting.py | pypi |
import copy
from six import iteritems, string_types
from opensearchpy._async.helpers.actions import aiter, async_scan
from opensearchpy.connection.async_connections import get_connection
from opensearchpy.exceptions import IllegalOperation, TransportError
from opensearchpy.helpers.aggs import A
from opensearchpy.helpers.query import Bool, Q
from opensearchpy.helpers.response import Response
from opensearchpy.helpers.search import AggsProxy, ProxyDescriptor, QueryProxy, Request
from opensearchpy.helpers.utils import AttrDict, recursive_to_dict
class AsyncSearch(Request):
query = ProxyDescriptor("query")
post_filter = ProxyDescriptor("post_filter")
def __init__(self, **kwargs):
"""
Search request to opensearch.
:arg using: `AsyncOpenSearch` instance to use
:arg index: limit the search to index
:arg doc_type: only query this type.
All the parameters supplied (or omitted) at creation type can be later
overridden by methods (`using`, `index` and `doc_type` respectively).
"""
super(AsyncSearch, self).__init__(**kwargs)
self.aggs = AggsProxy(self)
self._sort = []
self._source = None
self._highlight = {}
self._highlight_opts = {}
self._suggest = {}
self._script_fields = {}
self._response_class = Response
self._query_proxy = QueryProxy(self, "query")
self._post_filter_proxy = QueryProxy(self, "post_filter")
def filter(self, *args, **kwargs):
return self.query(Bool(filter=[Q(*args, **kwargs)]))
def exclude(self, *args, **kwargs):
return self.query(Bool(filter=[~Q(*args, **kwargs)]))
def __getitem__(self, n):
"""
Support slicing the `AsyncSearch` instance for pagination.
Slicing equates to the from/size parameters. E.g.::
s = AsyncSearch().query(...)[0:25]
is equivalent to::
s = AsyncSearch().query(...).extra(from_=0, size=25)
"""
s = self._clone()
if isinstance(n, slice):
# If negative slicing, abort.
if n.start and n.start < 0 or n.stop and n.stop < 0:
raise ValueError("AsyncSearch does not support negative slicing.")
# OpenSearch won't get all results so we default to size: 10 if
# stop not given.
s._extra["from"] = n.start or 0
s._extra["size"] = max(
0, n.stop - (n.start or 0) if n.stop is not None else 10
)
return s
else: # This is an index lookup, equivalent to slicing by [n:n+1].
# If negative index, abort.
if n < 0:
raise ValueError("AsyncSearch does not support negative indexing.")
s._extra["from"] = n
s._extra["size"] = 1
return s
@classmethod
def from_dict(cls, d):
"""
Construct a new `AsyncSearch` instance from a raw dict containing the search
body. Useful when migrating from raw dictionaries.
Example::
s = AsyncSearch.from_dict({
"query": {
"bool": {
"must": [...]
}
},
"aggs": {...}
})
s = s.filter('term', published=True)
"""
s = cls()
s.update_from_dict(d)
return s
def _clone(self):
"""
Return a clone of the current search request. Performs a shallow copy
of all the underlying objects. Used internally by most state modifying
APIs.
"""
s = super(AsyncSearch, self)._clone()
s._response_class = self._response_class
s._sort = self._sort[:]
s._source = copy.copy(self._source) if self._source is not None else None
s._highlight = self._highlight.copy()
s._highlight_opts = self._highlight_opts.copy()
s._suggest = self._suggest.copy()
s._script_fields = self._script_fields.copy()
for x in ("query", "post_filter"):
getattr(s, x)._proxied = getattr(self, x)._proxied
# copy top-level bucket definitions
if self.aggs._params.get("aggs"):
s.aggs._params = {"aggs": self.aggs._params["aggs"].copy()}
return s
def response_class(self, cls):
"""
Override the default wrapper used for the response.
"""
s = self._clone()
s._response_class = cls
return s
def update_from_dict(self, d):
"""
Apply options from a serialized body to the current instance. Modifies
the object in-place. Used mostly by ``from_dict``.
"""
d = d.copy()
if "query" in d:
self.query._proxied = Q(d.pop("query"))
if "post_filter" in d:
self.post_filter._proxied = Q(d.pop("post_filter"))
aggs = d.pop("aggs", d.pop("aggregations", {}))
if aggs:
self.aggs._params = {
"aggs": {name: A(value) for (name, value) in iteritems(aggs)}
}
if "sort" in d:
self._sort = d.pop("sort")
if "_source" in d:
self._source = d.pop("_source")
if "highlight" in d:
high = d.pop("highlight").copy()
self._highlight = high.pop("fields")
self._highlight_opts = high
if "suggest" in d:
self._suggest = d.pop("suggest")
if "text" in self._suggest:
text = self._suggest.pop("text")
for s in self._suggest.values():
s.setdefault("text", text)
if "script_fields" in d:
self._script_fields = d.pop("script_fields")
self._extra.update(d)
return self
def script_fields(self, **kwargs):
"""
Define script fields to be calculated on hits.
Example::
s = AsyncSearch()
s = s.script_fields(times_two="doc['field'].value * 2")
s = s.script_fields(
times_three={
'script': {
'lang': 'painless',
'source': "doc['field'].value * params.n",
'params': {'n': 3}
}
}
)
"""
s = self._clone()
for name in kwargs:
if isinstance(kwargs[name], string_types):
kwargs[name] = {"script": kwargs[name]}
s._script_fields.update(kwargs)
return s
def source(self, fields=None, **kwargs):
"""
Selectively control how the _source field is returned.
:arg fields: wildcard string, array of wildcards, or dictionary of includes and excludes
If ``fields`` is None, the entire document will be returned for
each hit. If fields is a dictionary with keys of 'includes' and/or
'excludes' the fields will be either included or excluded appropriately.
Calling this multiple times with the same named parameter will override the
previous values with the new ones.
Example::
s = AsyncSearch()
s = s.source(includes=['obj1.*'], excludes=["*.description"])
s = AsyncSearch()
s = s.source(includes=['obj1.*']).source(excludes=["*.description"])
"""
s = self._clone()
if fields and kwargs:
raise ValueError("You cannot specify fields and kwargs at the same time.")
if fields is not None:
s._source = fields
return s
if kwargs and not isinstance(s._source, dict):
s._source = {}
for key, value in kwargs.items():
if value is None:
try:
del s._source[key]
except KeyError:
pass
else:
s._source[key] = value
return s
def sort(self, *keys):
"""
Add sorting information to the search request. If called without
arguments it will remove all sort requirements. Otherwise it will
replace them. Acceptable arguments are::
'some.field'
'-some.other.field'
{'different.field': {'any': 'dict'}}
so for example::
s = AsyncSearch().sort(
'category',
'-title',
{"price" : {"order" : "asc", "mode" : "avg"}}
)
will sort by ``category``, ``title`` (in descending order) and
``price`` in ascending order using the ``avg`` mode.
The API returns a copy of the AsyncSearch object and can thus be chained.
"""
s = self._clone()
s._sort = []
for k in keys:
if isinstance(k, string_types) and k.startswith("-"):
if k[1:] == "_score":
raise IllegalOperation("Sorting by `-_score` is not allowed.")
k = {k[1:]: {"order": "desc"}}
s._sort.append(k)
return s
def highlight_options(self, **kwargs):
"""
Update the global highlighting options used for this request. For
example::
s = AsyncSearch()
s = s.highlight_options(order='score')
"""
s = self._clone()
s._highlight_opts.update(kwargs)
return s
def highlight(self, *fields, **kwargs):
"""
Request highlighting of some fields. All keyword arguments passed in will be
used as parameters for all the fields in the ``fields`` parameter. Example::
AsyncSearch().highlight('title', 'body', fragment_size=50)
will produce the equivalent of::
{
"highlight": {
"fields": {
"body": {"fragment_size": 50},
"title": {"fragment_size": 50}
}
}
}
If you want to have different options for different fields
you can call ``highlight`` twice::
AsyncSearch().highlight('title', fragment_size=50).highlight('body', fragment_size=100)
which will produce::
{
"highlight": {
"fields": {
"body": {"fragment_size": 100},
"title": {"fragment_size": 50}
}
}
}
"""
s = self._clone()
for f in fields:
s._highlight[f] = kwargs
return s
def suggest(self, name, text, **kwargs):
"""
Add a suggestions request to the search.
:arg name: name of the suggestion
:arg text: text to suggest on
All keyword arguments will be added to the suggestions body. For example::
s = AsyncSearch()
s = s.suggest('suggestion-1', 'AsyncOpenSearch', term={'field': 'body'})
"""
s = self._clone()
s._suggest[name] = {"text": text}
s._suggest[name].update(kwargs)
return s
def to_dict(self, count=False, **kwargs):
"""
Serialize the search into the dictionary that will be sent over as the
request's body.
:arg count: a flag to specify if we are interested in a body for count -
no aggregations, no pagination bounds etc.
All additional keyword arguments will be included into the dictionary.
"""
d = {}
if self.query:
d["query"] = self.query.to_dict()
# count request doesn't care for sorting and other things
if not count:
if self.post_filter:
d["post_filter"] = self.post_filter.to_dict()
if self.aggs.aggs:
d.update(self.aggs.to_dict())
if self._sort:
d["sort"] = self._sort
d.update(recursive_to_dict(self._extra))
if self._source not in (None, {}):
d["_source"] = self._source
if self._highlight:
d["highlight"] = {"fields": self._highlight}
d["highlight"].update(self._highlight_opts)
if self._suggest:
d["suggest"] = self._suggest
if self._script_fields:
d["script_fields"] = self._script_fields
d.update(recursive_to_dict(kwargs))
return d
async def count(self):
"""
Return the number of hits matching the query and filters. Note that
only the actual number is returned.
"""
if hasattr(self, "_response") and self._response.hits.total.relation == "eq":
return self._response.hits.total.value
opensearch = await get_connection(self._using)
d = self.to_dict(count=True)
# TODO: failed shards detection
return (await opensearch.count(index=self._index, body=d, **self._params))[
"count"
]
async def execute(self, ignore_cache=False):
"""
Execute the search and return an instance of ``Response`` wrapping all
the data.
:arg ignore_cache: if set to ``True``, consecutive calls will hit
AsyncOpenSearch, while cached result will be ignored. Defaults to `False`
"""
if ignore_cache or not hasattr(self, "_response"):
opensearch = await get_connection(self._using)
self._response = self._response_class(
self,
await opensearch.search(
index=self._index, body=self.to_dict(), **self._params
),
)
return self._response
async def scan(self):
"""
Turn the search into a scan search and return a generator that will
iterate over all the documents matching the query.
Use ``params`` method to specify any additional arguments you with to
pass to the underlying ``async_scan`` helper from ``opensearchpy``
"""
opensearch = await get_connection(self._using)
async for hit in aiter(
async_scan(
opensearch, query=self.to_dict(), index=self._index, **self._params
)
):
yield self._get_result(hit)
async def delete(self):
"""
delete() executes the query by delegating to delete_by_query()
"""
opensearch = await get_connection(self._using)
return AttrDict(
await opensearch.delete_by_query(
index=self._index, body=self.to_dict(), **self._params
)
)
class AsyncMultiSearch(Request):
"""
Combine multiple :class:`~opensearchpy.AsyncSearch` objects into a single
request.
"""
def __init__(self, **kwargs):
super(AsyncMultiSearch, self).__init__(**kwargs)
self._searches = []
def __getitem__(self, key):
return self._searches[key]
def __iter__(self):
return iter(self._searches)
def _clone(self):
ms = super(AsyncMultiSearch, self)._clone()
ms._searches = self._searches[:]
return ms
def add(self, search):
"""
Adds a new :class:`~opensearchpy.AsyncSearch` object to the request::
ms = AsyncMultiSearch(index='my-index')
ms = ms.add(AsyncSearch(doc_type=Category).filter('term', category='python'))
ms = ms.add(AsyncSearch(doc_type=Blog))
"""
ms = self._clone()
ms._searches.append(search)
return ms
def to_dict(self):
out = []
for s in self._searches:
meta = {}
if s._index:
meta["index"] = s._index
meta.update(s._params)
out.append(meta)
out.append(s.to_dict())
return out
async def execute(self, ignore_cache=False, raise_on_error=True):
"""
Execute the multi search request and return a list of search results.
"""
if ignore_cache or not hasattr(self, "_response"):
opensearch = await get_connection(self._using)
responses = await opensearch.msearch(
index=self._index, body=self.to_dict(), **self._params
)
out = []
for s, r in zip(self._searches, responses["responses"]):
if r.get("error", False):
if raise_on_error:
raise TransportError("N/A", r["error"]["type"], r["error"])
r = None
else:
r = Response(s, r)
out.append(r)
self._response = out
return self._response | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/_async/helpers/search.py | 0.814975 | 0.204084 | search.py | pypi |
from six import iteritems, itervalues
from opensearchpy._async.helpers.search import AsyncSearch
from opensearchpy.helpers.faceted_search import FacetedResponse
from opensearchpy.helpers.query import MatchAll
class AsyncFacetedSearch(object):
"""
Abstraction for creating faceted navigation searches that takes care of
composing the queries, aggregations and filters as needed as well as
presenting the results in an easy-to-consume fashion::
class BlogSearch(AsyncFacetedSearch):
index = 'blogs'
doc_types = [Blog, Post]
fields = ['title^5', 'category', 'description', 'body']
facets = {
'type': TermsFacet(field='_type'),
'category': TermsFacet(field='category'),
'weekly_posts': DateHistogramFacet(field='published_from', interval='week')
}
def search(self):
' Override search to add your own filters '
s = super(BlogSearch, self).search()
return s.filter('term', published=True)
# when using:
blog_search = BlogSearch("web framework", filters={"category": "python"})
# supports pagination
blog_search[10:20]
response = await blog_search.execute()
# easy access to aggregation results:
for category, hit_count, is_selected in response.facets.category:
print(
"Category %s has %d hits%s." % (
category,
hit_count,
' and is chosen' if is_selected else ''
)
)
"""
index = None
doc_types = None
fields = None
facets = {}
using = "default"
def __init__(self, query=None, filters={}, sort=()):
"""
:arg query: the text to search for
:arg filters: facet values to filter
:arg sort: sort information to be passed to :class:`~opensearchpy.AsyncSearch`
"""
self._query = query
self._filters = {}
self._sort = sort
self.filter_values = {}
for name, value in iteritems(filters):
self.add_filter(name, value)
self._s = self.build_search()
async def count(self):
return await self._s.count()
def __getitem__(self, k):
self._s = self._s[k]
return self
def __iter__(self):
return iter(self._s)
def add_filter(self, name, filter_values):
"""
Add a filter for a facet.
"""
# normalize the value into a list
if not isinstance(filter_values, (tuple, list)):
if filter_values is None:
return
filter_values = [
filter_values,
]
# remember the filter values for use in FacetedResponse
self.filter_values[name] = filter_values
# get the filter from the facet
f = self.facets[name].add_filter(filter_values)
if f is None:
return
self._filters[name] = f
def search(self):
"""
Returns the base Search object to which the facets are added.
You can customize the query by overriding this method and returning a
modified search object.
"""
s = AsyncSearch(doc_type=self.doc_types, index=self.index, using=self.using)
return s.response_class(FacetedResponse)
def query(self, search, query):
"""
Add query part to ``search``.
Override this if you wish to customize the query used.
"""
if query:
if self.fields:
return search.query("multi_match", fields=self.fields, query=query)
else:
return search.query("multi_match", query=query)
return search
def aggregate(self, search):
"""
Add aggregations representing the facets selected, including potential
filters.
"""
for f, facet in iteritems(self.facets):
agg = facet.get_aggregation()
agg_filter = MatchAll()
for field, filter in iteritems(self._filters):
if f == field:
continue
agg_filter &= filter
search.aggs.bucket("_filter_" + f, "filter", filter=agg_filter).bucket(
f, agg
)
def filter(self, search):
"""
Add a ``post_filter`` to the search request narrowing the results based
on the facet filters.
"""
if not self._filters:
return search
post_filter = MatchAll()
for f in itervalues(self._filters):
post_filter &= f
return search.post_filter(post_filter)
def highlight(self, search):
"""
Add highlighting for all the fields
"""
return search.highlight(
*(f if "^" not in f else f.split("^", 1)[0] for f in self.fields)
)
def sort(self, search):
"""
Add sorting information to the request.
"""
if self._sort:
search = search.sort(*self._sort)
return search
def build_search(self):
"""
Construct the ``AsyncSearch`` object.
"""
s = self.search()
s = self.query(s, self._query)
s = self.filter(s)
if self.fields:
s = self.highlight(s)
s = self.sort(s)
self.aggregate(s)
return s
async def execute(self):
"""
Execute the search and return the response.
"""
r = await self._s.execute()
r._faceted_search = self
return r | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/_async/helpers/faceted_search.py | 0.825765 | 0.416856 | faceted_search.py | pypi |
from opensearchpy.connection.async_connections import get_connection
from opensearchpy.helpers.query import Bool, Q
from opensearchpy.helpers.response import UpdateByQueryResponse
from opensearchpy.helpers.search import ProxyDescriptor, QueryProxy, Request
from opensearchpy.helpers.utils import recursive_to_dict
class AsyncUpdateByQuery(Request):
query = ProxyDescriptor("query")
def __init__(self, **kwargs):
"""
Update by query request to opensearch.
:arg using: `AsyncOpenSearch` instance to use
:arg index: limit the search to index
:arg doc_type: only query this type.
All the parameters supplied (or omitted) at creation type can be later
overridden by methods (`using`, `index` and `doc_type` respectively).
"""
super(AsyncUpdateByQuery, self).__init__(**kwargs)
self._response_class = UpdateByQueryResponse
self._script = {}
self._query_proxy = QueryProxy(self, "query")
def filter(self, *args, **kwargs):
return self.query(Bool(filter=[Q(*args, **kwargs)]))
def exclude(self, *args, **kwargs):
return self.query(Bool(filter=[~Q(*args, **kwargs)]))
@classmethod
def from_dict(cls, d):
"""
Construct a new `AsyncUpdateByQuery` instance from a raw dict containing the search
body. Useful when migrating from raw dictionaries.
Example::
ubq = AsyncUpdateByQuery.from_dict({
"query": {
"bool": {
"must": [...]
}
},
"script": {...}
})
ubq = ubq.filter('term', published=True)
"""
u = cls()
u.update_from_dict(d)
return u
def _clone(self):
"""
Return a clone of the current search request. Performs a shallow copy
of all the underlying objects. Used internally by most state modifying
APIs.
"""
ubq = super(AsyncUpdateByQuery, self)._clone()
ubq._response_class = self._response_class
ubq._script = self._script.copy()
ubq.query._proxied = self.query._proxied
return ubq
def response_class(self, cls):
"""
Override the default wrapper used for the response.
"""
ubq = self._clone()
ubq._response_class = cls
return ubq
def update_from_dict(self, d):
"""
Apply options from a serialized body to the current instance. Modifies
the object in-place. Used mostly by ``from_dict``.
"""
d = d.copy()
if "query" in d:
self.query._proxied = Q(d.pop("query"))
if "script" in d:
self._script = d.pop("script")
self._extra.update(d)
return self
def script(self, **kwargs):
"""
Define update action to take:
Note: the API only accepts a single script, so
calling the script multiple times will overwrite.
Example::
ubq = AsyncSearch()
ubq = ubq.script(source="ctx._source.likes++"")
ubq = ubq.script(source="ctx._source.likes += params.f"",
lang="expression",
params={'f': 3})
"""
ubq = self._clone()
if ubq._script:
ubq._script = {}
ubq._script.update(kwargs)
return ubq
def to_dict(self, **kwargs):
"""
Serialize the search into the dictionary that will be sent over as the
request'ubq body.
All additional keyword arguments will be included into the dictionary.
"""
d = {}
if self.query:
d["query"] = self.query.to_dict()
if self._script:
d["script"] = self._script
d.update(recursive_to_dict(self._extra))
d.update(recursive_to_dict(kwargs))
return d
async def execute(self):
"""
Execute the search and return an instance of ``Response`` wrapping all
the data.
"""
opensearch = await get_connection(self._using)
self._response = self._response_class(
self,
await opensearch.update_by_query(
index=self._index, body=self.to_dict(), **self._params
),
)
return self._response | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/_async/helpers/update_by_query.py | 0.910035 | 0.235317 | update_by_query.py | pypi |
from ..client.utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class IndexManagementClient(NamespacedClient):
@query_params()
async def put_policy(self, policy, body=None, params=None, headers=None):
"""
Creates, or updates, a policy.
:arg policy: The name of the policy
"""
if policy in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'policy'.")
return await self.transport.perform_request(
"PUT",
_make_path("_plugins", "_ism", "policies", policy),
params=params,
headers=headers,
body=body,
)
@query_params()
async def add_policy(self, index, body=None, params=None, headers=None):
"""
Adds a policy to an index. This operation does not change the policy if the index already has one.
:arg index: The name of the index to add policy on
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"POST",
_make_path("_plugins", "_ism", "add", index),
params=params,
headers=headers,
body=body,
)
@query_params()
async def get_policy(self, policy, params=None, headers=None):
"""
Gets the policy by `policy_id`.
:arg policy: The name of the policy
"""
if policy in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'policy'.")
return await self.transport.perform_request(
"GET",
_make_path("_plugins", "_ism", "policies", policy),
params=params,
headers=headers,
)
@query_params()
async def remove_policy_from_index(self, index, params=None, headers=None):
"""
Removes any ISM policy from the index.
:arg index: The name of the index to remove policy on
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"POST",
_make_path("_plugins", "_ism", "remove", index),
params=params,
headers=headers,
)
@query_params()
async def change_policy(self, index, body=None, params=None, headers=None):
"""
Updates the managed index policy to a new policy (or to a new version of the policy).
:arg index: The name of the index to change policy on
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"POST",
_make_path("_plugins", "_ism", "change_policy", index),
params=params,
headers=headers,
body=body,
)
@query_params()
async def retry(self, index, body=None, params=None, headers=None):
"""
Retries the failed action for an index.
:arg index: The name of the index whose is in a failed state
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"POST",
_make_path("_plugins", "_ism", "retry", index),
params=params,
headers=headers,
body=body,
)
@query_params("show_policy")
async def explain_index(self, index, params=None, headers=None):
"""
Gets the current state of the index.
:arg index: The name of the index to explain
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"GET",
_make_path("_plugins", "_ism", "explain", index),
params=params,
headers=headers,
)
@query_params()
async def delete_policy(self, policy, params=None, headers=None):
"""
Deletes the policy by `policy_id`.
:arg policy: The name of the policy to delete
"""
if policy in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'policy'.")
return await self.transport.perform_request(
"DELETE",
_make_path("_plugins", "_ism", "policies", policy),
params=params,
headers=headers,
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/_async/plugins/index_management.py | 0.69946 | 0.249619 | index_management.py | pypi |
from ..client.utils import NamespacedClient, _make_path, query_params
class AlertingClient(NamespacedClient):
@query_params()
async def search_monitor(self, body, params=None, headers=None):
"""
Returns the search result for a monitor.
:arg monitor_id: The configuration for the monitor we are trying to search
"""
return await self.transport.perform_request(
"GET",
_make_path("_plugins", "_alerting", "monitors", "_search"),
params=params,
headers=headers,
body=body,
)
@query_params()
async def get_monitor(self, monitor_id, params=None, headers=None):
"""
Returns the details of a specific monitor.
:arg monitor_id: The id of the monitor we are trying to fetch
"""
return await self.transport.perform_request(
"GET",
_make_path("_plugins", "_alerting", "monitors", monitor_id),
params=params,
headers=headers,
)
@query_params("dryrun")
async def run_monitor(self, monitor_id, params=None, headers=None):
"""
Runs/Executes a specific monitor.
:arg monitor_id: The id of the monitor we are trying to execute
:arg dryrun: Shows the results of a run without actions sending any message
"""
return await self.transport.perform_request(
"POST",
_make_path("_plugins", "_alerting", "monitors", monitor_id, "_execute"),
params=params,
headers=headers,
)
@query_params()
async def create_monitor(self, body=None, params=None, headers=None):
"""
Creates a monitor with inputs, triggers, and actions.
:arg body: The configuration for the monitor (`inputs`, `triggers`, and `actions`)
"""
return await self.transport.perform_request(
"POST",
_make_path("_plugins", "_alerting", "monitors"),
params=params,
headers=headers,
body=body,
)
@query_params()
async def update_monitor(self, monitor_id, body=None, params=None, headers=None):
"""
Updates a monitor's inputs, triggers, and actions.
:arg monitor_id: The id of the monitor we are trying to update
:arg body: The configuration for the monitor (`inputs`, `triggers`, and `actions`)
"""
return await self.transport.perform_request(
"PUT",
_make_path("_plugins", "_alerting", "monitors", monitor_id),
params=params,
headers=headers,
body=body,
)
@query_params()
async def delete_monitor(self, monitor_id, params=None, headers=None):
"""
Deletes a specific monitor.
:arg monitor_id: The id of the monitor we are trying to delete
"""
return await self.transport.perform_request(
"DELETE",
_make_path("_plugins", "_alerting", "monitors", monitor_id),
params=params,
headers=headers,
)
@query_params()
async def get_destination(self, destination_id=None, params=None, headers=None):
"""
Returns the details of a specific destination.
:arg destination_id: The id of the destination we are trying to fetch. If None, returns all destinations
"""
return await self.transport.perform_request(
"GET",
_make_path("_plugins", "_alerting", "destinations", destination_id)
if destination_id
else _make_path("_plugins", "_alerting", "destinations"),
params=params,
headers=headers,
)
@query_params()
async def create_destination(self, body=None, params=None, headers=None):
"""
Creates a destination for slack, mail, or custom-webhook.
:arg body: The configuration for the destination
"""
return await self.transport.perform_request(
"POST",
_make_path("_plugins", "_alerting", "destinations"),
params=params,
headers=headers,
body=body,
)
@query_params()
async def update_destination(
self, destination_id, body=None, params=None, headers=None
):
"""
Updates a destination's inputs, triggers, and actions.
:arg destination_id: The id of the destination we are trying to update
:arg body: The configuration for the destination
"""
return await self.transport.perform_request(
"PUT",
_make_path("_plugins", "_alerting", "destinations", destination_id),
params=params,
headers=headers,
body=body,
)
@query_params()
async def delete_destination(self, destination_id, params=None, headers=None):
"""
Deletes a specific destination.
:arg destination_id: The id of the destination we are trying to delete
"""
return await self.transport.perform_request(
"DELETE",
_make_path("_plugins", "_alerting", "destinations", destination_id),
params=params,
headers=headers,
)
@query_params()
async def get_alerts(self, params=None, headers=None):
"""
Returns all alerts.
"""
return await self.transport.perform_request(
"GET",
_make_path("_plugins", "_alerting", "monitors", "alerts"),
params=params,
headers=headers,
)
@query_params()
async def acknowledge_alert(self, monitor_id, body=None, params=None, headers=None):
"""
Acknowledges an alert.
:arg monitor_id: The id of the monitor, the alert belongs to
:arg body: The alerts to be acknowledged
"""
return await self.transport.perform_request(
"POST",
_make_path(
"_plugins",
"_alerting",
"monitors",
monitor_id,
"_acknowledge",
"alerts",
),
params=params,
headers=headers,
body=body,
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/_async/plugins/alerting.py | 0.764452 | 0.268821 | alerting.py | pypi |
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class SnapshotClient(NamespacedClient):
@query_params("master_timeout", "cluster_manager_timeout", "wait_for_completion")
async def create(self, repository, snapshot, body=None, params=None, headers=None):
"""
Creates a snapshot in a repository.
:arg repository: A repository name
:arg snapshot: A snapshot name
:arg body: The snapshot definition
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg wait_for_completion: Should this request wait until the
operation has completed before returning
"""
for param in (repository, snapshot):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"PUT",
_make_path("_snapshot", repository, snapshot),
params=params,
headers=headers,
body=body,
)
@query_params("master_timeout", "cluster_manager_timeout")
async def delete(self, repository, snapshot, params=None, headers=None):
"""
Deletes a snapshot.
:arg repository: A repository name
:arg snapshot: A snapshot name
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
for param in (repository, snapshot):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"DELETE",
_make_path("_snapshot", repository, snapshot),
params=params,
headers=headers,
)
@query_params(
"ignore_unavailable",
"include_repository",
"index_details",
"master_timeout",
"cluster_manager_timeout",
"verbose",
)
async def get(self, repository, snapshot, params=None, headers=None):
"""
Returns information about a snapshot.
:arg repository: A repository name
:arg snapshot: A comma-separated list of snapshot names
:arg ignore_unavailable: Whether to ignore unavailable
snapshots, defaults to false which means a SnapshotMissingException is
thrown
:arg include_repository: Whether to include the repository name
in the snapshot info. Defaults to true.
:arg index_details: Whether to include details of each index in
the snapshot, if those details are available. Defaults to false.
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg verbose: Whether to show verbose snapshot info or only show
the basic info found in the repository index blob
"""
for param in (repository, snapshot):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"GET",
_make_path("_snapshot", repository, snapshot),
params=params,
headers=headers,
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
async def delete_repository(self, repository, params=None, headers=None):
"""
Deletes a repository.
:arg repository: Name of the snapshot repository to unregister.
Wildcard (`*`) patterns are supported.
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
"""
if repository in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'repository'.")
return await self.transport.perform_request(
"DELETE",
_make_path("_snapshot", repository),
params=params,
headers=headers,
)
@query_params("local", "master_timeout", "cluster_manager_timeout")
async def get_repository(self, repository=None, params=None, headers=None):
"""
Returns information about a repository.
:arg repository: A comma-separated list of repository names
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
return await self.transport.perform_request(
"GET", _make_path("_snapshot", repository), params=params, headers=headers
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout", "verify")
async def create_repository(self, repository, body, params=None, headers=None):
"""
Creates a repository.
:arg repository: A repository name
:arg body: The repository definition
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
:arg verify: Whether to verify the repository after creation
"""
for param in (repository, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"PUT",
_make_path("_snapshot", repository),
params=params,
headers=headers,
body=body,
)
@query_params("master_timeout", "cluster_manager_timeout", "wait_for_completion")
async def restore(self, repository, snapshot, body=None, params=None, headers=None):
"""
Restores a snapshot.
:arg repository: A repository name
:arg snapshot: A snapshot name
:arg body: Details of what to restore
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg wait_for_completion: Should this request wait until the
operation has completed before returning
"""
for param in (repository, snapshot):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"POST",
_make_path("_snapshot", repository, snapshot, "_restore"),
params=params,
headers=headers,
body=body,
)
@query_params("ignore_unavailable", "master_timeout", "cluster_manager_timeout")
async def status(self, repository=None, snapshot=None, params=None, headers=None):
"""
Returns information about the status of a snapshot.
:arg repository: A repository name
:arg snapshot: A comma-separated list of snapshot names
:arg ignore_unavailable: Whether to ignore unavailable
snapshots, defaults to false which means a SnapshotMissingException is
thrown
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
return await self.transport.perform_request(
"GET",
_make_path("_snapshot", repository, snapshot, "_status"),
params=params,
headers=headers,
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
async def verify_repository(self, repository, params=None, headers=None):
"""
Verifies a repository.
:arg repository: A repository name
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
"""
if repository in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'repository'.")
return await self.transport.perform_request(
"POST",
_make_path("_snapshot", repository, "_verify"),
params=params,
headers=headers,
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
async def cleanup_repository(self, repository, params=None, headers=None):
"""
Removes stale data from repository.
:arg repository: A repository name
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
"""
if repository in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'repository'.")
return await self.transport.perform_request(
"POST",
_make_path("_snapshot", repository, "_cleanup"),
params=params,
headers=headers,
)
@query_params("master_timeout", "cluster_manager_timeout")
async def clone(
self, repository, snapshot, target_snapshot, body, params=None, headers=None
):
"""
Clones indices from one snapshot into another snapshot in the same repository.
:arg repository: A repository name
:arg snapshot: The name of the snapshot to clone from
:arg target_snapshot: The name of the cloned snapshot to create
:arg body: The snapshot clone definition
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
for param in (repository, snapshot, target_snapshot, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"PUT",
_make_path("_snapshot", repository, snapshot, "_clone", target_snapshot),
params=params,
headers=headers,
body=body,
)
@query_params(
"blob_count",
"concurrency",
"detailed",
"early_read_node_count",
"max_blob_size",
"max_total_data_size",
"rare_action_probability",
"rarely_abort_writes",
"read_node_count",
"seed",
"timeout",
)
async def repository_analyze(self, repository, params=None, headers=None):
"""
Analyzes a repository for correctness and performance
:arg repository: A repository name
:arg blob_count: Number of blobs to create during the test.
Defaults to 100.
:arg concurrency: Number of operations to run concurrently
during the test. Defaults to 10.
:arg detailed: Whether to return detailed results or a summary.
Defaults to 'false' so that only the summary is returned.
:arg early_read_node_count: Number of nodes on which to perform
an early read on a blob, i.e. before writing has completed. Early reads
are rare actions so the 'rare_action_probability' parameter is also
relevant. Defaults to 2.
:arg max_blob_size: Maximum size of a blob to create during the
test, e.g '1gb' or '100mb'. Defaults to '10mb'.
:arg max_total_data_size: Maximum total size of all blobs to
create during the test, e.g '1tb' or '100gb'. Defaults to '1gb'.
:arg rare_action_probability: Probability of taking a rare
action such as an early read or an overwrite. Defaults to 0.02.
:arg rarely_abort_writes: Whether to rarely abort writes before
they complete. Defaults to 'true'.
:arg read_node_count: Number of nodes on which to read a blob
after writing. Defaults to 10.
:arg seed: Seed for the random number generator used to create
the test workload. Defaults to a random value.
:arg timeout: Explicit operation timeout. Defaults to '30s'.
"""
if repository in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'repository'.")
return await self.transport.perform_request(
"POST",
_make_path("_snapshot", repository, "_analyze"),
params=params,
headers=headers,
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/_async/client/snapshot.py | 0.523908 | 0.18429 | snapshot.py | pypi |
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class DanglingIndicesClient(NamespacedClient):
@query_params(
"accept_data_loss", "master_timeout", "cluster_manager_timeout", "timeout"
)
async def delete_dangling_index(self, index_uuid, params=None, headers=None):
"""
Deletes the specified dangling index
:arg index_uuid: The UUID of the dangling index
:arg accept_data_loss: Must be set to true in order to delete
the dangling index
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
if index_uuid in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index_uuid'.")
return await self.transport.perform_request(
"DELETE",
_make_path("_dangling", index_uuid),
params=params,
headers=headers,
)
@query_params(
"accept_data_loss", "master_timeout", "cluster_manager_timeout", "timeout"
)
async def import_dangling_index(self, index_uuid, params=None, headers=None):
"""
Imports the specified dangling index
:arg index_uuid: The UUID of the dangling index
:arg accept_data_loss: Must be set to true in order to import
the dangling index
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
if index_uuid in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index_uuid'.")
return await self.transport.perform_request(
"POST", _make_path("_dangling", index_uuid), params=params, headers=headers
)
@query_params()
async def list_dangling_indices(self, params=None, headers=None):
"""
Returns all dangling indices.
"""
return await self.transport.perform_request(
"GET", "/_dangling", params=params, headers=headers
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/_async/client/dangling_indices.py | 0.593374 | 0.172033 | dangling_indices.py | pypi |
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class ClusterClient(NamespacedClient):
@query_params(
"expand_wildcards",
"level",
"local",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
"wait_for_events",
"wait_for_no_initializing_shards",
"wait_for_no_relocating_shards",
"wait_for_nodes",
"wait_for_status",
)
async def health(self, index=None, params=None, headers=None):
"""
Returns basic information about the health of the cluster.
:arg index: Limit the information returned to a specific index
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: all
:arg level: Specify the level of detail for returned information
Valid choices: cluster, indices, shards Default: cluster
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Wait until the specified number of
shards is active
:arg wait_for_events: Wait until all currently queued events
with the given priority are processed Valid choices: immediate, urgent,
high, normal, low, languid
:arg wait_for_no_initializing_shards: Whether to wait until
there are no initializing shards in the cluster
:arg wait_for_no_relocating_shards: Whether to wait until there
are no relocating shards in the cluster
:arg wait_for_nodes: Wait until the specified number of nodes is
available
:arg wait_for_status: Wait until cluster is in a specific state
Valid choices: green, yellow, red
"""
return await self.transport.perform_request(
"GET",
_make_path("_cluster", "health", index),
params=params,
headers=headers,
)
@query_params("local", "master_timeout", "cluster_manager_timeout")
async def pending_tasks(self, params=None, headers=None):
"""
Returns a list of any cluster-level changes (e.g. create index, update mapping,
allocate or fail shard) which have not yet been executed.
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
"""
return await self.transport.perform_request(
"GET", "/_cluster/pending_tasks", params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flat_settings",
"ignore_unavailable",
"local",
"master_timeout",
"cluster_manager_timeout",
"wait_for_metadata_version",
"wait_for_timeout",
)
async def state(self, metric=None, index=None, params=None, headers=None):
"""
Returns a comprehensive information about the state of the cluster.
:arg metric: Limit the information returned to the specified
metrics Valid choices: _all, blocks, metadata, nodes, routing_table,
routing_nodes, master_node, version
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg flat_settings: Return settings in flat format (default:
false)
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg wait_for_metadata_version: Wait for the metadata version to
be equal or greater than the specified metadata version
:arg wait_for_timeout: The maximum time to wait for
wait_for_metadata_version before timing out
"""
if index and metric in SKIP_IN_PATH:
metric = "_all"
return await self.transport.perform_request(
"GET",
_make_path("_cluster", "state", metric, index),
params=params,
headers=headers,
)
@query_params("flat_settings", "timeout")
async def stats(self, node_id=None, params=None, headers=None):
"""
Returns high-level overview of cluster statistics.
:arg node_id: A comma-separated list of node IDs or names to
limit the returned information; use `_local` to return information from
the node you're connecting to, leave empty to get information from all
nodes
:arg flat_settings: Return settings in flat format (default:
false)
:arg timeout: Explicit operation timeout
"""
return await self.transport.perform_request(
"GET",
"/_cluster/stats"
if node_id in SKIP_IN_PATH
else _make_path("_cluster", "stats", "nodes", node_id),
params=params,
headers=headers,
)
@query_params(
"dry_run",
"explain",
"master_timeout",
"cluster_manager_timeout",
"metric",
"retry_failed",
"timeout",
)
async def reroute(self, body=None, params=None, headers=None):
"""
Allows to manually change the allocation of individual shards in the cluster.
:arg body: The definition of `commands` to perform (`move`,
`cancel`, `allocate`)
:arg dry_run: Simulate the operation only and return the
resulting state
:arg explain: Return an explanation of why the commands can or
cannot be executed
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg metric: Limit the information returned to the specified
metrics. Defaults to all but metadata Valid choices: _all, blocks,
metadata, nodes, routing_table, master_node, version
:arg retry_failed: Retries allocation of shards that are blocked
due to too many subsequent allocation failures
:arg timeout: Explicit operation timeout
"""
return await self.transport.perform_request(
"POST", "/_cluster/reroute", params=params, headers=headers, body=body
)
@query_params(
"flat_settings",
"include_defaults",
"master_timeout",
"cluster_manager_timeout",
"timeout",
)
async def get_settings(self, params=None, headers=None):
"""
Returns cluster settings.
:arg flat_settings: Return settings in flat format (default:
false)
:arg include_defaults: Whether to return all default clusters
setting.
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
"""
return await self.transport.perform_request(
"GET", "/_cluster/settings", params=params, headers=headers
)
@query_params(
"flat_settings", "master_timeout", "cluster_manager_timeout", "timeout"
)
async def put_settings(self, body, params=None, headers=None):
"""
Updates the cluster settings.
:arg body: The settings to be updated. Can be either `transient`
or `persistent` (survives cluster restart).
:arg flat_settings: Return settings in flat format (default:
false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return await self.transport.perform_request(
"PUT", "/_cluster/settings", params=params, headers=headers, body=body
)
@query_params()
async def remote_info(self, params=None, headers=None):
"""
Returns the information about configured remote clusters.
"""
return await self.transport.perform_request(
"GET", "/_remote/info", params=params, headers=headers
)
@query_params("include_disk_info", "include_yes_decisions")
async def allocation_explain(self, body=None, params=None, headers=None):
"""
Provides explanations for shard allocations in the cluster.
:arg body: The index, shard, and primary flag to explain. Empty
means 'explain the first unassigned shard'
:arg include_disk_info: Return information about disk usage and
shard sizes (default: false)
:arg include_yes_decisions: Return 'YES' decisions in
explanation (default: false)
"""
return await self.transport.perform_request(
"POST",
"/_cluster/allocation/explain",
params=params,
headers=headers,
body=body,
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
async def delete_component_template(self, name, params=None, headers=None):
"""
Deletes a component template
:arg name: The name of the template
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"DELETE",
_make_path("_component_template", name),
params=params,
headers=headers,
)
@query_params("local", "master_timeout", "cluster_manager_timeout")
async def get_component_template(self, name=None, params=None, headers=None):
"""
Returns one or more component templates
:arg name: The comma separated names of the component templates
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
return await self.transport.perform_request(
"GET",
_make_path("_component_template", name),
params=params,
headers=headers,
)
@query_params("create", "master_timeout", "cluster_manager_timeout", "timeout")
async def put_component_template(self, name, body, params=None, headers=None):
"""
Creates or updates a component template
:arg name: The name of the template
:arg body: The template definition
:arg create: Whether the index template should only be added if
new or can also replace an existing one
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
for param in (name, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"PUT",
_make_path("_component_template", name),
params=params,
headers=headers,
body=body,
)
@query_params("local", "master_timeout", "cluster_manager_timeout")
async def exists_component_template(self, name, params=None, headers=None):
"""
Returns information about whether a particular component template exist
:arg name: The name of the template
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"HEAD",
_make_path("_component_template", name),
params=params,
headers=headers,
)
@query_params("wait_for_removal")
async def delete_voting_config_exclusions(self, params=None, headers=None):
"""
Clears cluster voting config exclusions.
:arg wait_for_removal: Specifies whether to wait for all
excluded nodes to be removed from the cluster before clearing the voting
configuration exclusions list. Default: True
"""
return await self.transport.perform_request(
"DELETE",
"/_cluster/voting_config_exclusions",
params=params,
headers=headers,
)
@query_params("node_ids", "node_names", "timeout")
async def post_voting_config_exclusions(self, params=None, headers=None):
"""
Updates the cluster voting config exclusions by node ids or node names.
:arg node_ids: A comma-separated list of the persistent ids of
the nodes to exclude from the voting configuration. If specified, you
may not also specify ?node_names.
:arg node_names: A comma-separated list of the names of the
nodes to exclude from the voting configuration. If specified, you may
not also specify ?node_ids.
:arg timeout: Explicit operation timeout Default: 30s
"""
return await self.transport.perform_request(
"POST", "/_cluster/voting_config_exclusions", params=params, headers=headers
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/_async/client/cluster.py | 0.642769 | 0.325266 | cluster.py | pypi |
from .utils import NamespacedClient, _make_path, query_params
class NodesClient(NamespacedClient):
@query_params("timeout")
async def reload_secure_settings(
self, body=None, node_id=None, params=None, headers=None
):
"""
Reloads secure settings.
:arg body: An object containing the password for the
opensearch keystore
:arg node_id: A comma-separated list of node IDs to span the
reload/reinit call. Should stay empty because reloading usually involves
all cluster nodes.
:arg timeout: Explicit operation timeout
"""
return await self.transport.perform_request(
"POST",
_make_path("_nodes", node_id, "reload_secure_settings"),
params=params,
headers=headers,
body=body,
)
@query_params("flat_settings", "timeout")
async def info(self, node_id=None, metric=None, params=None, headers=None):
"""
Returns information about nodes in the cluster.
:arg node_id: A comma-separated list of node IDs or names to
limit the returned information; use `_local` to return information from
the node you're connecting to, leave empty to get information from all
nodes
:arg metric: A comma-separated list of metrics you wish
returned. Leave empty to return all. Valid choices: settings, os,
process, jvm, thread_pool, transport, http, plugins, ingest
:arg flat_settings: Return settings in flat format (default:
false)
:arg timeout: Explicit operation timeout
"""
return await self.transport.perform_request(
"GET", _make_path("_nodes", node_id, metric), params=params, headers=headers
)
@query_params(
"completion_fields",
"fielddata_fields",
"fields",
"groups",
"include_segment_file_sizes",
"include_unloaded_segments",
"level",
"timeout",
"types",
)
async def stats(
self, node_id=None, metric=None, index_metric=None, params=None, headers=None
):
"""
Returns statistical information about nodes in the cluster.
:arg node_id: A comma-separated list of node IDs or names to
limit the returned information; use `_local` to return information from
the node you're connecting to, leave empty to get information from all
nodes
:arg metric: Limit the information returned to the specified
metrics Valid choices: _all, breaker, fs, http, indices, jvm, os,
process, thread_pool, transport, discovery, indexing_pressure
:arg index_metric: Limit the information returned for `indices`
metric to the specific index metrics. Isn't used if `indices` (or `all`)
metric isn't specified. Valid choices: _all, completion, docs,
fielddata, query_cache, flush, get, indexing, merge, request_cache,
refresh, search, segments, store, warmer, suggest
:arg completion_fields: A comma-separated list of fields for
`fielddata` and `suggest` index metric (supports wildcards)
:arg fielddata_fields: A comma-separated list of fields for
`fielddata` index metric (supports wildcards)
:arg fields: A comma-separated list of fields for `fielddata`
and `completion` index metric (supports wildcards)
:arg groups: A comma-separated list of search groups for
`search` index metric
:arg include_segment_file_sizes: Whether to report the
aggregated disk usage of each one of the Lucene index files (only
applies if segment stats are requested)
:arg include_unloaded_segments: If set to true segment stats
will include stats for segments that are not currently loaded into
memory
:arg level: Return indices stats aggregated at index, node or
shard level Valid choices: indices, node, shards Default: node
:arg timeout: Explicit operation timeout
:arg types: A comma-separated list of document types for the
`indexing` index metric
"""
return await self.transport.perform_request(
"GET",
_make_path("_nodes", node_id, "stats", metric, index_metric),
params=params,
headers=headers,
)
@query_params(
"doc_type", "ignore_idle_threads", "interval", "snapshots", "threads", "timeout"
)
async def hot_threads(self, node_id=None, params=None, headers=None):
"""
Returns information about hot threads on each node in the cluster.
:arg node_id: A comma-separated list of node IDs or names to
limit the returned information; use `_local` to return information from
the node you're connecting to, leave empty to get information from all
nodes
:arg doc_type: The type to sample (default: cpu) Valid choices:
cpu, wait, block
:arg ignore_idle_threads: Don't show threads that are in known-
idle places, such as waiting on a socket select or pulling from an empty
task queue (default: true)
:arg interval: The interval for the second sampling of threads
:arg snapshots: Number of samples of thread stacktrace (default:
10)
:arg threads: Specify the number of threads to provide
information for (default: 3)
:arg timeout: Explicit operation timeout
"""
# type is a reserved word so it cannot be used, use doc_type instead
if "doc_type" in params:
params["type"] = params.pop("doc_type")
return await self.transport.perform_request(
"GET",
_make_path("_nodes", node_id, "hot_threads"),
params=params,
headers=headers,
)
@query_params("timeout")
async def usage(self, node_id=None, metric=None, params=None, headers=None):
"""
Returns low-level information about REST actions usage on nodes.
:arg node_id: A comma-separated list of node IDs or names to
limit the returned information; use `_local` to return information from
the node you're connecting to, leave empty to get information from all
nodes
:arg metric: Limit the information returned to the specified
metrics Valid choices: _all, rest_actions
:arg timeout: Explicit operation timeout
"""
return await self.transport.perform_request(
"GET",
_make_path("_nodes", node_id, "usage", metric),
params=params,
headers=headers,
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/_async/client/nodes.py | 0.794225 | 0.218649 | nodes.py | pypi |
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class IngestClient(NamespacedClient):
@query_params("master_timeout", "cluster_manager_timeout", "summary")
async def get_pipeline(self, id=None, params=None, headers=None):
"""
Returns a pipeline.
:arg id: Comma separated list of pipeline ids. Wildcards
supported
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg summary: Return pipelines without their definitions
(default: false)
"""
return await self.transport.perform_request(
"GET", _make_path("_ingest", "pipeline", id), params=params, headers=headers
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
async def put_pipeline(self, id, body, params=None, headers=None):
"""
Creates or updates a pipeline.
:arg id: Pipeline ID
:arg body: The ingest definition
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
"""
for param in (id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"PUT",
_make_path("_ingest", "pipeline", id),
params=params,
headers=headers,
body=body,
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
async def delete_pipeline(self, id, params=None, headers=None):
"""
Deletes a pipeline.
:arg id: Pipeline ID
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
"""
if id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'id'.")
return await self.transport.perform_request(
"DELETE",
_make_path("_ingest", "pipeline", id),
params=params,
headers=headers,
)
@query_params("verbose")
async def simulate(self, body, id=None, params=None, headers=None):
"""
Allows to simulate a pipeline with example documents.
:arg body: The simulate definition
:arg id: Pipeline ID
:arg verbose: Verbose mode. Display data output for each
processor in executed pipeline
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return await self.transport.perform_request(
"POST",
_make_path("_ingest", "pipeline", id, "_simulate"),
params=params,
headers=headers,
body=body,
)
@query_params()
async def processor_grok(self, params=None, headers=None):
"""
Returns a list of the built-in patterns.
"""
return await self.transport.perform_request(
"GET", "/_ingest/processor/grok", params=params, headers=headers
)
@query_params()
async def geo_ip_stats(self, params=None, headers=None):
"""
Returns statistical information about geoip databases
"""
return await self.transport.perform_request(
"GET", "/_ingest/geoip/stats", params=params, headers=headers
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/_async/client/ingest.py | 0.609873 | 0.203371 | ingest.py | pypi |
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class IndicesClient(NamespacedClient):
@query_params()
async def analyze(self, body=None, index=None, params=None, headers=None):
"""
Performs the analysis process on a text and return the tokens breakdown of the
text.
:arg body: Define analyzer/tokenizer parameters and the text on
which the analysis should be performed
:arg index: The name of the index to scope the operation
"""
return await self.transport.perform_request(
"POST",
_make_path(index, "_analyze"),
params=params,
headers=headers,
body=body,
)
@query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
async def refresh(self, index=None, params=None, headers=None):
"""
Performs the refresh operation in one or more indices.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
"""
return await self.transport.perform_request(
"POST", _make_path(index, "_refresh"), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"force",
"ignore_unavailable",
"wait_if_ongoing",
)
async def flush(self, index=None, params=None, headers=None):
"""
Performs the flush operation on one or more indices.
:arg index: A comma-separated list of index names; use `_all` or
empty string for all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg force: Whether a flush should be forced even if it is not
necessarily needed ie. if no changes will be committed to the index.
This is useful if transaction log IDs should be incremented even if no
uncommitted changes are present. (This setting can be considered as
internal)
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg wait_if_ongoing: If set to true the flush operation will
block until the flush can be executed if another flush operation is
already executing. The default is true. If set to false the flush will
be skipped iff if another flush operation is already running.
"""
return await self.transport.perform_request(
"POST", _make_path(index, "_flush"), params=params, headers=headers
)
@query_params(
"master_timeout", "cluster_manager_timeout", "timeout", "wait_for_active_shards"
)
async def create(self, index, body=None, params=None, headers=None):
"""
Creates an index with optional settings and mappings.
:arg index: The name of the index
:arg body: The configuration for the index (`settings` and
`mappings`)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Set the number of active shards to
wait for before the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"PUT", _make_path(index), params=params, headers=headers, body=body
)
@query_params(
"master_timeout", "cluster_manager_timeout", "timeout", "wait_for_active_shards"
)
async def clone(self, index, target, body=None, params=None, headers=None):
"""
Clones an index
:arg index: The name of the source index to clone
:arg target: The name of the target index to clone into
:arg body: The configuration for the target index (`settings`
and `aliases`)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Set the number of active shards to
wait for on the cloned index before the operation returns.
"""
for param in (index, target):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"PUT",
_make_path(index, "_clone", target),
params=params,
headers=headers,
body=body,
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flat_settings",
"ignore_unavailable",
"include_defaults",
"local",
"master_timeout",
"cluster_manager_timeout",
)
async def get(self, index, params=None, headers=None):
"""
Returns information about one or more indices.
:arg index: A comma-separated list of index names
:arg allow_no_indices: Ignore if a wildcard expression resolves
to no concrete indices (default: false)
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
:arg flat_settings: Return settings in flat format (default:
false)
:arg ignore_unavailable: Ignore unavailable indexes (default:
false)
:arg include_defaults: Whether to return all default setting for
each of the indices.
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"GET", _make_path(index), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
)
async def open(self, index, params=None, headers=None):
"""
Opens an index.
:arg index: A comma separated list of indices to open
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: closed
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Sets the number of active shards to
wait for before the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"POST", _make_path(index, "_open"), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
)
async def close(self, index, params=None, headers=None):
"""
Closes an index.
:arg index: A comma separated list of indices to close
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Sets the number of active shards to
wait for before the operation returns. Set to `index-setting` to wait
according to the index setting `index.write.wait_for_active_shards`, or
`all` to wait for all shards, or an integer. Defaults to `0`.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"POST", _make_path(index, "_close"), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"timeout",
)
async def delete(self, index, params=None, headers=None):
"""
Deletes an index.
:arg index: A comma-separated list of indices to delete; use
`_all` or `*` string to delete all indices
:arg allow_no_indices: Ignore if a wildcard expression resolves
to no concrete indices (default: false)
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Ignore unavailable indexes (default:
false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"DELETE", _make_path(index), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flat_settings",
"ignore_unavailable",
"include_defaults",
"local",
)
async def exists(self, index, params=None, headers=None):
"""
Returns information about whether a particular index exists.
:arg index: A comma-separated list of index names
:arg allow_no_indices: Ignore if a wildcard expression resolves
to no concrete indices (default: false)
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
:arg flat_settings: Return settings in flat format (default:
false)
:arg ignore_unavailable: Ignore unavailable indexes (default:
false)
:arg include_defaults: Whether to return all default setting for
each of the indices.
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"HEAD", _make_path(index), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"write_index_only",
)
async def put_mapping(self, body, index=None, params=None, headers=None):
"""
Updates the index mappings.
:arg body: The mapping definition
:arg index: A comma-separated list of index names the mapping
should be added to (supports wildcards); use `_all` or omit to add the
mapping on all indices.
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg write_index_only: When true, applies mappings only to the
write index of an alias or data stream
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return await self.transport.perform_request(
"PUT",
_make_path(index, "_mapping"),
params=params,
headers=headers,
body=body,
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"local",
"master_timeout",
"cluster_manager_timeout",
)
async def get_mapping(self, index=None, params=None, headers=None):
"""
Returns mappings for one or more indices.
:arg index: A comma-separated list of index names
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
"""
return await self.transport.perform_request(
"GET",
_make_path(index, "_mapping"),
params=params,
headers=headers,
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"include_defaults",
"local",
)
async def get_field_mapping(self, fields, index=None, params=None, headers=None):
"""
Returns mapping for one or more fields.
:arg fields: A comma-separated list of fields
:arg index: A comma-separated list of index names
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg include_defaults: Whether the default mapping values should
be returned as well
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
"""
if fields in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'fields'.")
return await self.transport.perform_request(
"GET",
_make_path(index, "_mapping", "field", fields),
params=params,
headers=headers,
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
async def put_alias(self, index, name, body=None, params=None, headers=None):
"""
Creates or updates an alias.
:arg index: A comma-separated list of index names the alias
should point to (supports wildcards); use `_all` to perform the
operation on all indices.
:arg name: The name of the alias to be created or updated
:arg body: The settings for the alias, such as `routing` or
`filter`
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit timestamp for the document
"""
for param in (index, name):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"PUT",
_make_path(index, "_alias", name),
params=params,
headers=headers,
body=body,
)
@query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local")
async def exists_alias(self, name, index=None, params=None, headers=None):
"""
Returns information about whether a particular alias exists.
:arg name: A comma-separated list of alias names to return
:arg index: A comma-separated list of index names to filter
aliases
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: all
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"HEAD", _make_path(index, "_alias", name), params=params, headers=headers
)
@query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local")
async def get_alias(self, index=None, name=None, params=None, headers=None):
"""
Returns an alias.
:arg index: A comma-separated list of index names to filter
aliases
:arg name: A comma-separated list of alias names to return
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: all
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
"""
return await self.transport.perform_request(
"GET", _make_path(index, "_alias", name), params=params, headers=headers
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
async def update_aliases(self, body, params=None, headers=None):
"""
Updates index aliases.
:arg body: The definition of `actions` to perform
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Request timeout
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return await self.transport.perform_request(
"POST", "/_aliases", params=params, headers=headers, body=body
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
async def delete_alias(self, index, name, params=None, headers=None):
"""
Deletes an alias.
:arg index: A comma-separated list of index names (supports
wildcards); use `_all` for all indices
:arg name: A comma-separated list of aliases to delete (supports
wildcards); use `_all` to delete all aliases for the specified indices.
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit timestamp for the document
"""
for param in (index, name):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"DELETE", _make_path(index, "_alias", name), params=params, headers=headers
)
@query_params("create", "master_timeout", "cluster_manager_timeout", "order")
async def put_template(self, name, body, params=None, headers=None):
"""
Creates or updates an index template.
:arg name: The name of the template
:arg body: The template definition
:arg create: Whether the index template should only be added if
new or can also replace an existing one
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg order: The order for this template when merging multiple
matching ones (higher numbers are merged later, overriding the lower
numbers)
"""
for param in (name, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"PUT",
_make_path("_template", name),
params=params,
headers=headers,
body=body,
)
@query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout")
async def exists_template(self, name, params=None, headers=None):
"""
Returns information about whether a particular index template exists.
:arg name: The comma separated names of the index templates
:arg flat_settings: Return settings in flat format (default:
false)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"HEAD", _make_path("_template", name), params=params, headers=headers
)
@query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout")
async def get_template(self, name=None, params=None, headers=None):
"""
Returns an index template.
:arg name: The comma separated names of the index templates
:arg flat_settings: Return settings in flat format (default:
false)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
return await self.transport.perform_request(
"GET", _make_path("_template", name), params=params, headers=headers
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
async def delete_template(self, name, params=None, headers=None):
"""
Deletes an index template.
:arg name: The name of the template
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"DELETE", _make_path("_template", name), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flat_settings",
"ignore_unavailable",
"include_defaults",
"local",
"master_timeout",
"cluster_manager_timeout",
)
async def get_settings(self, index=None, name=None, params=None, headers=None):
"""
Returns settings for one or more indices.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg name: The name of the settings that should be included
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: all
:arg flat_settings: Return settings in flat format (default:
false)
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg include_defaults: Whether to return all default setting for
each of the indices.
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
"""
return await self.transport.perform_request(
"GET", _make_path(index, "_settings", name), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flat_settings",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"preserve_existing",
"timeout",
)
async def put_settings(self, body, index=None, params=None, headers=None):
"""
Updates the index settings.
:arg body: The index settings to be updated
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg flat_settings: Return settings in flat format (default:
false)
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg preserve_existing: Whether to update existing settings. If
set to `true` existing settings on an index remain unchanged, the
default is `false`
:arg timeout: Explicit operation timeout
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return await self.transport.perform_request(
"PUT",
_make_path(index, "_settings"),
params=params,
headers=headers,
body=body,
)
@query_params(
"completion_fields",
"expand_wildcards",
"fielddata_fields",
"fields",
"forbid_closed_indices",
"groups",
"include_segment_file_sizes",
"include_unloaded_segments",
"level",
"types",
)
async def stats(self, index=None, metric=None, params=None, headers=None):
"""
Provides statistics on operations happening in an index.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg metric: Limit the information returned the specific
metrics. Valid choices: _all, completion, docs, fielddata, query_cache,
flush, get, indexing, merge, request_cache, refresh, search, segments,
store, warmer, suggest
:arg completion_fields: A comma-separated list of fields for
`fielddata` and `suggest` index metric (supports wildcards)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg fielddata_fields: A comma-separated list of fields for
`fielddata` index metric (supports wildcards)
:arg fields: A comma-separated list of fields for `fielddata`
and `completion` index metric (supports wildcards)
:arg forbid_closed_indices: If set to false stats will also
collected from closed indices if explicitly specified or if
expand_wildcards expands to closed indices Default: True
:arg groups: A comma-separated list of search groups for
`search` index metric
:arg include_segment_file_sizes: Whether to report the
aggregated disk usage of each one of the Lucene index files (only
applies if segment stats are requested)
:arg include_unloaded_segments: If set to true segment stats
will include stats for segments that are not currently loaded into
memory
:arg level: Return stats aggregated at cluster, index or shard
level Valid choices: cluster, indices, shards Default: indices
:arg types: A comma-separated list of document types for the
`indexing` index metric
"""
return await self.transport.perform_request(
"GET", _make_path(index, "_stats", metric), params=params, headers=headers
)
@query_params(
"allow_no_indices", "expand_wildcards", "ignore_unavailable", "verbose"
)
async def segments(self, index=None, params=None, headers=None):
"""
Provides low-level information about segments in a Lucene index.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg verbose: Includes detailed memory usage by Lucene.
"""
return await self.transport.perform_request(
"GET", _make_path(index, "_segments"), params=params, headers=headers
)
@query_params(
"all_shards",
"allow_no_indices",
"analyze_wildcard",
"analyzer",
"default_operator",
"df",
"expand_wildcards",
"explain",
"ignore_unavailable",
"lenient",
"q",
"rewrite",
)
async def validate_query(self, body=None, index=None, params=None, headers=None):
"""
Allows a user to validate a potentially expensive query without executing it.
:arg body: The query definition specified with the Query DSL
:arg index: A comma-separated list of index names to restrict
the operation; use `_all` or empty string to perform the operation on
all indices
:arg all_shards: Execute validation on all shards instead of one
random shard per index
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg analyze_wildcard: Specify whether wildcard and prefix
queries should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg default_operator: The default operator for query string
query (AND or OR) Valid choices: AND, OR Default: OR
:arg df: The field to use as default where no field prefix is
given in the query string
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg explain: Return detailed information about the error
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such
as providing text to a numeric field) should be ignored
:arg q: Query in the Lucene query string syntax
:arg rewrite: Provide a more detailed explanation showing the
actual Lucene query that will be executed.
"""
return await self.transport.perform_request(
"POST",
_make_path(index, "_validate", "query"),
params=params,
headers=headers,
body=body,
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"fielddata",
"fields",
"ignore_unavailable",
"query",
"request",
)
async def clear_cache(self, index=None, params=None, headers=None):
"""
Clears all or specific caches for one or more indices.
:arg index: A comma-separated list of index name to limit the
operation
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg fielddata: Clear field data
:arg fields: A comma-separated list of fields to clear when
using the `fielddata` parameter (default: all)
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg query: Clear query caches
:arg request: Clear request cache
"""
return await self.transport.perform_request(
"POST", _make_path(index, "_cache", "clear"), params=params, headers=headers
)
@query_params("active_only", "detailed")
async def recovery(self, index=None, params=None, headers=None):
"""
Returns information about ongoing index shard recoveries.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg active_only: Display only those recoveries that are
currently on-going
:arg detailed: Whether to display detailed information about
shard recovery
"""
return await self.transport.perform_request(
"GET", _make_path(index, "_recovery"), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"only_ancient_segments",
"wait_for_completion",
)
async def upgrade(self, index=None, params=None, headers=None):
"""
DEPRECATED Upgrades to the current version of Lucene.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg only_ancient_segments: If true, only ancient (an older
Lucene major release) segments will be upgraded
:arg wait_for_completion: Specify whether the request should
block until the all segments are upgraded (default: false)
"""
return await self.transport.perform_request(
"POST", _make_path(index, "_upgrade"), params=params, headers=headers
)
@query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
async def get_upgrade(self, index=None, params=None, headers=None):
"""
DEPRECATED Returns a progress status of current upgrade.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
"""
return await self.transport.perform_request(
"GET", _make_path(index, "_upgrade"), params=params, headers=headers
)
@query_params(
"allow_no_indices", "expand_wildcards", "ignore_unavailable", "status"
)
async def shard_stores(self, index=None, params=None, headers=None):
"""
Provides store information for shard copies of indices.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg status: A comma-separated list of statuses used to filter
on shards to get store information for Valid choices: green, yellow,
red, all
"""
return await self.transport.perform_request(
"GET", _make_path(index, "_shard_stores"), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flush",
"ignore_unavailable",
"max_num_segments",
"only_expunge_deletes",
)
async def forcemerge(self, index=None, params=None, headers=None):
"""
Performs the force merge operation on one or more indices.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg flush: Specify whether the index should be flushed after
performing the operation (default: true)
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg max_num_segments: The number of segments the index should
be merged into (default: dynamic)
:arg only_expunge_deletes: Specify whether the operation should
only expunge deleted documents
"""
return await self.transport.perform_request(
"POST", _make_path(index, "_forcemerge"), params=params, headers=headers
)
@query_params(
"copy_settings",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
)
async def shrink(self, index, target, body=None, params=None, headers=None):
"""
Allow to shrink an existing index into a new index with fewer primary shards.
:arg index: The name of the source index to shrink
:arg target: The name of the target index to shrink into
:arg body: The configuration for the target index (`settings`
and `aliases`)
:arg copy_settings: whether or not to copy settings from the
source index (defaults to false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Set the number of active shards to
wait for on the shrunken index before the operation returns.
"""
for param in (index, target):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"PUT",
_make_path(index, "_shrink", target),
params=params,
headers=headers,
body=body,
)
@query_params(
"copy_settings",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
)
async def split(self, index, target, body=None, params=None, headers=None):
"""
Allows you to split an existing index into a new index with more primary
shards.
:arg index: The name of the source index to split
:arg target: The name of the target index to split into
:arg body: The configuration for the target index (`settings`
and `aliases`)
:arg copy_settings: whether or not to copy settings from the
source index (defaults to false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Set the number of active shards to
wait for on the shrunken index before the operation returns.
"""
for param in (index, target):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"PUT",
_make_path(index, "_split", target),
params=params,
headers=headers,
body=body,
)
@query_params(
"dry_run",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
)
async def rollover(
self, alias, body=None, new_index=None, params=None, headers=None
):
"""
Updates an alias to point to a new index when the existing index is considered
to be too large or too old.
:arg alias: The name of the alias to rollover
:arg body: The conditions that needs to be met for executing
rollover
:arg new_index: The name of the rollover index
:arg dry_run: If set to true the rollover action will only be
validated but not actually performed even if a condition matches. The
default is false
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Set the number of active shards to
wait for on the newly created rollover index before the operation
returns.
"""
if alias in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'alias'.")
return await self.transport.perform_request(
"POST",
_make_path(alias, "_rollover", new_index),
params=params,
headers=headers,
body=body,
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
)
async def freeze(self, index, params=None, headers=None):
"""
Freezes an index. A frozen index has almost no overhead on the cluster (except
for maintaining its metadata in memory) and is read-only.
:arg index: The name of the index to freeze
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: closed
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Sets the number of active shards to
wait for before the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"POST", _make_path(index, "_freeze"), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
)
async def unfreeze(self, index, params=None, headers=None):
"""
Unfreezes an index. When a frozen index is unfrozen, the index goes through the
normal recovery process and becomes writeable again.
:arg index: The name of the index to unfreeze
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: closed
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Sets the number of active shards to
wait for before the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"POST", _make_path(index, "_unfreeze"), params=params, headers=headers
)
@query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
async def reload_search_analyzers(self, index, params=None, headers=None):
"""
Reloads an index's search analyzers and their resources.
:arg index: A comma-separated list of index names to reload
analyzers for
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"GET",
_make_path(index, "_reload_search_analyzers"),
params=params,
headers=headers,
)
@query_params()
async def create_data_stream(self, name, params=None, headers=None):
"""
Creates a data stream
:arg name: The name of the data stream
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"PUT", _make_path("_data_stream", name), params=params, headers=headers
)
@query_params("expand_wildcards")
async def delete_data_stream(self, name, params=None, headers=None):
"""
Deletes a data stream.
:arg name: A comma-separated list of data streams to delete; use
`*` to delete all data streams
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"DELETE", _make_path("_data_stream", name), params=params, headers=headers
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
async def delete_index_template(self, name, params=None, headers=None):
"""
Deletes an index template.
:arg name: The name of the template
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"DELETE",
_make_path("_index_template", name),
params=params,
headers=headers,
)
@query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout")
async def exists_index_template(self, name, params=None, headers=None):
"""
Returns information about whether a particular index template exists.
:arg name: The name of the template
:arg flat_settings: Return settings in flat format (default:
false)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"HEAD", _make_path("_index_template", name), params=params, headers=headers
)
@query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout")
async def get_index_template(self, name=None, params=None, headers=None):
"""
Returns an index template.
:arg name: The comma separated names of the index templates
:arg flat_settings: Return settings in flat format (default:
false)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
return await self.transport.perform_request(
"GET", _make_path("_index_template", name), params=params, headers=headers
)
@query_params("cause", "create", "master_timeout", "cluster_manager_timeout")
async def put_index_template(self, name, body, params=None, headers=None):
"""
Creates or updates an index template.
:arg name: The name of the template
:arg body: The template definition
:arg cause: User defined reason for creating/updating the index
template
:arg create: Whether the index template should only be added if
new or can also replace an existing one
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
"""
for param in (name, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"PUT",
_make_path("_index_template", name),
params=params,
headers=headers,
body=body,
)
@query_params("cause", "create", "master_timeout", "cluster_manager_timeout")
async def simulate_index_template(self, name, body=None, params=None, headers=None):
"""
Simulate matching the given index name against the index templates in the
system
:arg name: The name of the index (it must be a concrete index
name)
:arg body: New index template definition, which will be included
in the simulation, as if it already exists in the system
:arg cause: User defined reason for dry-run creating the new
template for simulation purposes
:arg create: Whether the index template we optionally defined in
the body should only be dry-run added if new or can also replace an
existing one
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"POST",
_make_path("_index_template", "_simulate_index", name),
params=params,
headers=headers,
body=body,
)
@query_params("expand_wildcards")
async def get_data_stream(self, name=None, params=None, headers=None):
"""
Returns data streams.
:arg name: A comma-separated list of data streams to get; use
`*` to get all data streams
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
"""
return await self.transport.perform_request(
"GET", _make_path("_data_stream", name), params=params, headers=headers
)
@query_params("cause", "create", "master_timeout", "cluster_manager_timeout")
async def simulate_template(self, body=None, name=None, params=None, headers=None):
"""
Simulate resolving the given template name or body
:arg body: New index template definition to be simulated, if no
index template name is specified
:arg name: The name of the index template
:arg cause: User defined reason for dry-run creating the new
template for simulation purposes
:arg create: Whether the index template we optionally defined in
the body should only be dry-run added if new or can also replace an
existing one
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
"""
return await self.transport.perform_request(
"POST",
_make_path("_index_template", "_simulate", name),
params=params,
headers=headers,
body=body,
)
@query_params("expand_wildcards")
async def resolve_index(self, name, params=None, headers=None):
"""
Returns information about any matching indices, aliases, and data streams
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg name: A comma-separated list of names or wildcard
expressions
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"GET", _make_path("_resolve", "index", name), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"timeout",
)
async def add_block(self, index, block, params=None, headers=None):
"""
Adds a block to an index.
:arg index: A comma separated list of indices to add a block to
:arg block: The block to add (one of read, write, read_only or
metadata)
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
for param in (index, block):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return await self.transport.perform_request(
"PUT", _make_path(index, "_block", block), params=params, headers=headers
)
@query_params()
async def data_streams_stats(self, name=None, params=None, headers=None):
"""
Provides statistics on operations happening in a data stream.
:arg name: A comma-separated list of data stream names; use
`_all` or empty string to perform the operation on all data streams
"""
return await self.transport.perform_request(
"GET",
_make_path("_data_stream", name, "_stats"),
params=params,
headers=headers,
)
@query_params()
async def promote_data_stream(self, name, params=None, headers=None):
"""
Promotes a data stream from a replicated data stream managed by CCR to a
regular data stream
:arg name: The name of the data stream
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"POST",
_make_path("_data_stream", "_promote", name),
params=params,
headers=headers,
)
@query_params()
async def migrate_to_data_stream(self, name, params=None, headers=None):
"""
Migrates an alias to a data stream
:arg name: The name of the alias to migrate
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return await self.transport.perform_request(
"POST",
_make_path("_data_stream", "_migrate", name),
params=params,
headers=headers,
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flush",
"ignore_unavailable",
"run_expensive_tasks",
)
async def disk_usage(self, index, params=None, headers=None):
"""
Analyzes the disk usage of each field of an index or data stream
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg index: Comma-separated list of indices or data streams to
analyze the disk usage
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg flush: Whether flush or not before analyzing the index disk
usage. Defaults to true
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg run_expensive_tasks: Must be set to [true] in order for the
task to be performed. Defaults to false.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"POST", _make_path(index, "_disk_usage"), params=params, headers=headers
)
@query_params(
"allow_no_indices", "expand_wildcards", "fields", "ignore_unavailable"
)
async def field_usage_stats(self, index, params=None, headers=None):
"""
Returns the field usage stats for each field of an index
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg fields: A comma-separated list of fields to include in the
stats if only a subset of fields should be returned (supports wildcards)
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"GET",
_make_path(index, "_field_usage_stats"),
params=params,
headers=headers,
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/_async/client/indices.py | 0.675015 | 0.334617 | indices.py | pypi |
import warnings
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class TasksClient(NamespacedClient):
@query_params(
"actions",
"detailed",
"group_by",
"nodes",
"parent_task_id",
"timeout",
"wait_for_completion",
)
async def list(self, params=None, headers=None):
"""
Returns a list of tasks.
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg actions: A comma-separated list of actions that should be
returned. Leave empty to return all.
:arg detailed: Return detailed task information (default: false)
:arg group_by: Group tasks by nodes or parent/child
relationships Valid choices: nodes, parents, none Default: nodes
:arg nodes: A comma-separated list of node IDs or names to limit
the returned information; use `_local` to return information from the
node you're connecting to, leave empty to get information from all nodes
:arg parent_task_id: Return tasks with specified parent task id
(node_id:task_number). Set to -1 to return all.
:arg timeout: Explicit operation timeout
:arg wait_for_completion: Wait for the matching tasks to
complete (default: false)
"""
return await self.transport.perform_request(
"GET", "/_tasks", params=params, headers=headers
)
@query_params("actions", "nodes", "parent_task_id", "wait_for_completion")
async def cancel(self, task_id=None, params=None, headers=None):
"""
Cancels a task, if it can be cancelled through an API.
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg task_id: Cancel the task with specified task id
(node_id:task_number)
:arg actions: A comma-separated list of actions that should be
cancelled. Leave empty to cancel all.
:arg nodes: A comma-separated list of node IDs or names to limit
the returned information; use `_local` to return information from the
node you're connecting to, leave empty to get information from all nodes
:arg parent_task_id: Cancel tasks with specified parent task id
(node_id:task_number). Set to -1 to cancel all.
:arg wait_for_completion: Should the request block until the
cancellation of the task and its descendant tasks is completed. Defaults
to false
"""
return await self.transport.perform_request(
"POST",
_make_path("_tasks", task_id, "_cancel"),
params=params,
headers=headers,
)
@query_params("timeout", "wait_for_completion")
async def get(self, task_id=None, params=None, headers=None):
"""
Returns information about a task.
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg task_id: Return the task with specified id
(node_id:task_number)
:arg timeout: Explicit operation timeout
:arg wait_for_completion: Wait for the matching tasks to
complete (default: false)
"""
if task_id in SKIP_IN_PATH:
warnings.warn(
"Calling client.tasks.get() without a task_id is deprecated "
"and will be removed in v8.0. Use client.tasks.list() instead.",
category=DeprecationWarning,
stacklevel=3,
)
return await self.transport.perform_request(
"GET", _make_path("_tasks", task_id), params=params, headers=headers
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/_async/client/tasks.py | 0.622 | 0.265357 | tasks.py | pypi |
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class SnapshotClient(NamespacedClient):
@query_params("master_timeout", "cluster_manager_timeout", "wait_for_completion")
def create(self, repository, snapshot, body=None, params=None, headers=None):
"""
Creates a snapshot in a repository.
:arg repository: A repository name
:arg snapshot: A snapshot name
:arg body: The snapshot definition
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg wait_for_completion: Should this request wait until the
operation has completed before returning
"""
for param in (repository, snapshot):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path("_snapshot", repository, snapshot),
params=params,
headers=headers,
body=body,
)
@query_params("master_timeout", "cluster_manager_timeout")
def delete(self, repository, snapshot, params=None, headers=None):
"""
Deletes a snapshot.
:arg repository: A repository name
:arg snapshot: A snapshot name
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
for param in (repository, snapshot):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"DELETE",
_make_path("_snapshot", repository, snapshot),
params=params,
headers=headers,
)
@query_params(
"ignore_unavailable",
"include_repository",
"index_details",
"master_timeout",
"cluster_manager_timeout",
"verbose",
)
def get(self, repository, snapshot, params=None, headers=None):
"""
Returns information about a snapshot.
:arg repository: A repository name
:arg snapshot: A comma-separated list of snapshot names
:arg ignore_unavailable: Whether to ignore unavailable
snapshots, defaults to false which means a SnapshotMissingException is
thrown
:arg include_repository: Whether to include the repository name
in the snapshot info. Defaults to true.
:arg index_details: Whether to include details of each index in
the snapshot, if those details are available. Defaults to false.
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg verbose: Whether to show verbose snapshot info or only show
the basic info found in the repository index blob
"""
for param in (repository, snapshot):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"GET",
_make_path("_snapshot", repository, snapshot),
params=params,
headers=headers,
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
def delete_repository(self, repository, params=None, headers=None):
"""
Deletes a repository.
:arg repository: Name of the snapshot repository to unregister.
Wildcard (`*`) patterns are supported.
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
"""
if repository in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'repository'.")
return self.transport.perform_request(
"DELETE",
_make_path("_snapshot", repository),
params=params,
headers=headers,
)
@query_params("local", "master_timeout", "cluster_manager_timeout")
def get_repository(self, repository=None, params=None, headers=None):
"""
Returns information about a repository.
:arg repository: A comma-separated list of repository names
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
return self.transport.perform_request(
"GET", _make_path("_snapshot", repository), params=params, headers=headers
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout", "verify")
def create_repository(self, repository, body, params=None, headers=None):
"""
Creates a repository.
:arg repository: A repository name
:arg body: The repository definition
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
:arg verify: Whether to verify the repository after creation
"""
for param in (repository, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path("_snapshot", repository),
params=params,
headers=headers,
body=body,
)
@query_params("master_timeout", "cluster_manager_timeout", "wait_for_completion")
def restore(self, repository, snapshot, body=None, params=None, headers=None):
"""
Restores a snapshot.
:arg repository: A repository name
:arg snapshot: A snapshot name
:arg body: Details of what to restore
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg wait_for_completion: Should this request wait until the
operation has completed before returning
"""
for param in (repository, snapshot):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"POST",
_make_path("_snapshot", repository, snapshot, "_restore"),
params=params,
headers=headers,
body=body,
)
@query_params("ignore_unavailable", "master_timeout", "cluster_manager_timeout")
def status(self, repository=None, snapshot=None, params=None, headers=None):
"""
Returns information about the status of a snapshot.
:arg repository: A repository name
:arg snapshot: A comma-separated list of snapshot names
:arg ignore_unavailable: Whether to ignore unavailable
snapshots, defaults to false which means a SnapshotMissingException is
thrown
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
return self.transport.perform_request(
"GET",
_make_path("_snapshot", repository, snapshot, "_status"),
params=params,
headers=headers,
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
def verify_repository(self, repository, params=None, headers=None):
"""
Verifies a repository.
:arg repository: A repository name
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
"""
if repository in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'repository'.")
return self.transport.perform_request(
"POST",
_make_path("_snapshot", repository, "_verify"),
params=params,
headers=headers,
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
def cleanup_repository(self, repository, params=None, headers=None):
"""
Removes stale data from repository.
:arg repository: A repository name
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
"""
if repository in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'repository'.")
return self.transport.perform_request(
"POST",
_make_path("_snapshot", repository, "_cleanup"),
params=params,
headers=headers,
)
@query_params("master_timeout", "cluster_manager_timeout")
def clone(
self, repository, snapshot, target_snapshot, body, params=None, headers=None
):
"""
Clones indices from one snapshot into another snapshot in the same repository.
:arg repository: A repository name
:arg snapshot: The name of the snapshot to clone from
:arg target_snapshot: The name of the cloned snapshot to create
:arg body: The snapshot clone definition
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
for param in (repository, snapshot, target_snapshot, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path("_snapshot", repository, snapshot, "_clone", target_snapshot),
params=params,
headers=headers,
body=body,
)
@query_params(
"blob_count",
"concurrency",
"detailed",
"early_read_node_count",
"max_blob_size",
"max_total_data_size",
"rare_action_probability",
"rarely_abort_writes",
"read_node_count",
"seed",
"timeout",
)
def repository_analyze(self, repository, params=None, headers=None):
"""
Analyzes a repository for correctness and performance
:arg repository: A repository name
:arg blob_count: Number of blobs to create during the test.
Defaults to 100.
:arg concurrency: Number of operations to run concurrently
during the test. Defaults to 10.
:arg detailed: Whether to return detailed results or a summary.
Defaults to 'false' so that only the summary is returned.
:arg early_read_node_count: Number of nodes on which to perform
an early read on a blob, i.e. before writing has completed. Early reads
are rare actions so the 'rare_action_probability' parameter is also
relevant. Defaults to 2.
:arg max_blob_size: Maximum size of a blob to create during the
test, e.g '1gb' or '100mb'. Defaults to '10mb'.
:arg max_total_data_size: Maximum total size of all blobs to
create during the test, e.g '1tb' or '100gb'. Defaults to '1gb'.
:arg rare_action_probability: Probability of taking a rare
action such as an early read or an overwrite. Defaults to 0.02.
:arg rarely_abort_writes: Whether to rarely abort writes before
they complete. Defaults to 'true'.
:arg read_node_count: Number of nodes on which to read a blob
after writing. Defaults to 10.
:arg seed: Seed for the random number generator used to create
the test workload. Defaults to a random value.
:arg timeout: Explicit operation timeout. Defaults to '30s'.
"""
if repository in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'repository'.")
return self.transport.perform_request(
"POST",
_make_path("_snapshot", repository, "_analyze"),
params=params,
headers=headers,
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/client/snapshot.py | 0.543833 | 0.177312 | snapshot.py | pypi |
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class DanglingIndicesClient(NamespacedClient):
@query_params(
"accept_data_loss", "master_timeout", "cluster_manager_timeout", "timeout"
)
def delete_dangling_index(self, index_uuid, params=None, headers=None):
"""
Deletes the specified dangling index
:arg index_uuid: The UUID of the dangling index
:arg accept_data_loss: Must be set to true in order to delete
the dangling index
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
if index_uuid in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index_uuid'.")
return self.transport.perform_request(
"DELETE",
_make_path("_dangling", index_uuid),
params=params,
headers=headers,
)
@query_params(
"accept_data_loss", "master_timeout", "cluster_manager_timeout", "timeout"
)
def import_dangling_index(self, index_uuid, params=None, headers=None):
"""
Imports the specified dangling index
:arg index_uuid: The UUID of the dangling index
:arg accept_data_loss: Must be set to true in order to import
the dangling index
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
if index_uuid in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index_uuid'.")
return self.transport.perform_request(
"POST", _make_path("_dangling", index_uuid), params=params, headers=headers
)
@query_params()
def list_dangling_indices(self, params=None, headers=None):
"""
Returns all dangling indices.
"""
return self.transport.perform_request(
"GET", "/_dangling", params=params, headers=headers
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/client/dangling_indices.py | 0.594198 | 0.177668 | dangling_indices.py | pypi |
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class ClusterClient(NamespacedClient):
@query_params(
"expand_wildcards",
"level",
"local",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
"wait_for_events",
"wait_for_no_initializing_shards",
"wait_for_no_relocating_shards",
"wait_for_nodes",
"wait_for_status",
)
def health(self, index=None, params=None, headers=None):
"""
Returns basic information about the health of the cluster.
:arg index: Limit the information returned to a specific index
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: all
:arg level: Specify the level of detail for returned information
Valid choices: cluster, indices, shards Default: cluster
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Wait until the specified number of
shards is active
:arg wait_for_events: Wait until all currently queued events
with the given priority are processed Valid choices: immediate, urgent,
high, normal, low, languid
:arg wait_for_no_initializing_shards: Whether to wait until
there are no initializing shards in the cluster
:arg wait_for_no_relocating_shards: Whether to wait until there
are no relocating shards in the cluster
:arg wait_for_nodes: Wait until the specified number of nodes is
available
:arg wait_for_status: Wait until cluster is in a specific state
Valid choices: green, yellow, red
"""
return self.transport.perform_request(
"GET",
_make_path("_cluster", "health", index),
params=params,
headers=headers,
)
@query_params("local", "master_timeout", "cluster_manager_timeout")
def pending_tasks(self, params=None, headers=None):
"""
Returns a list of any cluster-level changes (e.g. create index, update mapping,
allocate or fail shard) which have not yet been executed.
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
"""
return self.transport.perform_request(
"GET", "/_cluster/pending_tasks", params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flat_settings",
"ignore_unavailable",
"local",
"master_timeout",
"cluster_manager_timeout",
"wait_for_metadata_version",
"wait_for_timeout",
)
def state(self, metric=None, index=None, params=None, headers=None):
"""
Returns a comprehensive information about the state of the cluster.
:arg metric: Limit the information returned to the specified
metrics Valid choices: _all, blocks, metadata, nodes, routing_table,
routing_nodes, master_node, version
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg flat_settings: Return settings in flat format (default:
false)
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg wait_for_metadata_version: Wait for the metadata version to
be equal or greater than the specified metadata version
:arg wait_for_timeout: The maximum time to wait for
wait_for_metadata_version before timing out
"""
if index and metric in SKIP_IN_PATH:
metric = "_all"
return self.transport.perform_request(
"GET",
_make_path("_cluster", "state", metric, index),
params=params,
headers=headers,
)
@query_params("flat_settings", "timeout")
def stats(self, node_id=None, params=None, headers=None):
"""
Returns high-level overview of cluster statistics.
:arg node_id: A comma-separated list of node IDs or names to
limit the returned information; use `_local` to return information from
the node you're connecting to, leave empty to get information from all
nodes
:arg flat_settings: Return settings in flat format (default:
false)
:arg timeout: Explicit operation timeout
"""
return self.transport.perform_request(
"GET",
"/_cluster/stats"
if node_id in SKIP_IN_PATH
else _make_path("_cluster", "stats", "nodes", node_id),
params=params,
headers=headers,
)
@query_params(
"dry_run",
"explain",
"master_timeout",
"cluster_manager_timeout",
"metric",
"retry_failed",
"timeout",
)
def reroute(self, body=None, params=None, headers=None):
"""
Allows to manually change the allocation of individual shards in the cluster.
:arg body: The definition of `commands` to perform (`move`,
`cancel`, `allocate`)
:arg dry_run: Simulate the operation only and return the
resulting state
:arg explain: Return an explanation of why the commands can or
cannot be executed
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg metric: Limit the information returned to the specified
metrics. Defaults to all but metadata Valid choices: _all, blocks,
metadata, nodes, routing_table, master_node, version
:arg retry_failed: Retries allocation of shards that are blocked
due to too many subsequent allocation failures
:arg timeout: Explicit operation timeout
"""
return self.transport.perform_request(
"POST", "/_cluster/reroute", params=params, headers=headers, body=body
)
@query_params(
"flat_settings",
"include_defaults",
"master_timeout",
"cluster_manager_timeout",
"timeout",
)
def get_settings(self, params=None, headers=None):
"""
Returns cluster settings.
:arg flat_settings: Return settings in flat format (default:
false)
:arg include_defaults: Whether to return all default clusters
setting.
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
"""
return self.transport.perform_request(
"GET", "/_cluster/settings", params=params, headers=headers
)
@query_params(
"flat_settings", "master_timeout", "cluster_manager_timeout", "timeout"
)
def put_settings(self, body, params=None, headers=None):
"""
Updates the cluster settings.
:arg body: The settings to be updated. Can be either `transient`
or `persistent` (survives cluster restart).
:arg flat_settings: Return settings in flat format (default:
false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"PUT", "/_cluster/settings", params=params, headers=headers, body=body
)
@query_params()
def remote_info(self, params=None, headers=None):
"""
Returns the information about configured remote clusters.
"""
return self.transport.perform_request(
"GET", "/_remote/info", params=params, headers=headers
)
@query_params("include_disk_info", "include_yes_decisions")
def allocation_explain(self, body=None, params=None, headers=None):
"""
Provides explanations for shard allocations in the cluster.
:arg body: The index, shard, and primary flag to explain. Empty
means 'explain the first unassigned shard'
:arg include_disk_info: Return information about disk usage and
shard sizes (default: false)
:arg include_yes_decisions: Return 'YES' decisions in
explanation (default: false)
"""
return self.transport.perform_request(
"POST",
"/_cluster/allocation/explain",
params=params,
headers=headers,
body=body,
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
def delete_component_template(self, name, params=None, headers=None):
"""
Deletes a component template
:arg name: The name of the template
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"DELETE",
_make_path("_component_template", name),
params=params,
headers=headers,
)
@query_params("local", "master_timeout", "cluster_manager_timeout")
def get_component_template(self, name=None, params=None, headers=None):
"""
Returns one or more component templates
:arg name: The comma separated names of the component templates
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
return self.transport.perform_request(
"GET",
_make_path("_component_template", name),
params=params,
headers=headers,
)
@query_params("create", "master_timeout", "cluster_manager_timeout", "timeout")
def put_component_template(self, name, body, params=None, headers=None):
"""
Creates or updates a component template
:arg name: The name of the template
:arg body: The template definition
:arg create: Whether the index template should only be added if
new or can also replace an existing one
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
for param in (name, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path("_component_template", name),
params=params,
headers=headers,
body=body,
)
@query_params("local", "master_timeout", "cluster_manager_timeout")
def exists_component_template(self, name, params=None, headers=None):
"""
Returns information about whether a particular component template exist
:arg name: The name of the template
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"HEAD",
_make_path("_component_template", name),
params=params,
headers=headers,
)
@query_params("wait_for_removal")
def delete_voting_config_exclusions(self, params=None, headers=None):
"""
Clears cluster voting config exclusions.
:arg wait_for_removal: Specifies whether to wait for all
excluded nodes to be removed from the cluster before clearing the voting
configuration exclusions list. Default: True
"""
return self.transport.perform_request(
"DELETE",
"/_cluster/voting_config_exclusions",
params=params,
headers=headers,
)
@query_params("node_ids", "node_names", "timeout")
def post_voting_config_exclusions(self, params=None, headers=None):
"""
Updates the cluster voting config exclusions by node ids or node names.
:arg node_ids: A comma-separated list of the persistent ids of
the nodes to exclude from the voting configuration. If specified, you
may not also specify ?node_names.
:arg node_names: A comma-separated list of the names of the
nodes to exclude from the voting configuration. If specified, you may
not also specify ?node_ids.
:arg timeout: Explicit operation timeout Default: 30s
"""
return self.transport.perform_request(
"POST", "/_cluster/voting_config_exclusions", params=params, headers=headers
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/client/cluster.py | 0.658527 | 0.347897 | cluster.py | pypi |
from .utils import NamespacedClient, _make_path, query_params
class NodesClient(NamespacedClient):
@query_params("timeout")
def reload_secure_settings(
self, body=None, node_id=None, params=None, headers=None
):
"""
Reloads secure settings.
:arg body: An object containing the password for the
opensearch keystore
:arg node_id: A comma-separated list of node IDs to span the
reload/reinit call. Should stay empty because reloading usually involves
all cluster nodes.
:arg timeout: Explicit operation timeout
"""
return self.transport.perform_request(
"POST",
_make_path("_nodes", node_id, "reload_secure_settings"),
params=params,
headers=headers,
body=body,
)
@query_params("flat_settings", "timeout")
def info(self, node_id=None, metric=None, params=None, headers=None):
"""
Returns information about nodes in the cluster.
:arg node_id: A comma-separated list of node IDs or names to
limit the returned information; use `_local` to return information from
the node you're connecting to, leave empty to get information from all
nodes
:arg metric: A comma-separated list of metrics you wish
returned. Leave empty to return all. Valid choices: settings, os,
process, jvm, thread_pool, transport, http, plugins, ingest
:arg flat_settings: Return settings in flat format (default:
false)
:arg timeout: Explicit operation timeout
"""
return self.transport.perform_request(
"GET", _make_path("_nodes", node_id, metric), params=params, headers=headers
)
@query_params(
"completion_fields",
"fielddata_fields",
"fields",
"groups",
"include_segment_file_sizes",
"include_unloaded_segments",
"level",
"timeout",
"types",
)
def stats(
self, node_id=None, metric=None, index_metric=None, params=None, headers=None
):
"""
Returns statistical information about nodes in the cluster.
:arg node_id: A comma-separated list of node IDs or names to
limit the returned information; use `_local` to return information from
the node you're connecting to, leave empty to get information from all
nodes
:arg metric: Limit the information returned to the specified
metrics Valid choices: _all, breaker, fs, http, indices, jvm, os,
process, thread_pool, transport, discovery, indexing_pressure
:arg index_metric: Limit the information returned for `indices`
metric to the specific index metrics. Isn't used if `indices` (or `all`)
metric isn't specified. Valid choices: _all, completion, docs,
fielddata, query_cache, flush, get, indexing, merge, request_cache,
refresh, search, segments, store, warmer, suggest
:arg completion_fields: A comma-separated list of fields for
`fielddata` and `suggest` index metric (supports wildcards)
:arg fielddata_fields: A comma-separated list of fields for
`fielddata` index metric (supports wildcards)
:arg fields: A comma-separated list of fields for `fielddata`
and `completion` index metric (supports wildcards)
:arg groups: A comma-separated list of search groups for
`search` index metric
:arg include_segment_file_sizes: Whether to report the
aggregated disk usage of each one of the Lucene index files (only
applies if segment stats are requested)
:arg include_unloaded_segments: If set to true segment stats
will include stats for segments that are not currently loaded into
memory
:arg level: Return indices stats aggregated at index, node or
shard level Valid choices: indices, node, shards Default: node
:arg timeout: Explicit operation timeout
:arg types: A comma-separated list of document types for the
`indexing` index metric
"""
return self.transport.perform_request(
"GET",
_make_path("_nodes", node_id, "stats", metric, index_metric),
params=params,
headers=headers,
)
@query_params(
"doc_type", "ignore_idle_threads", "interval", "snapshots", "threads", "timeout"
)
def hot_threads(self, node_id=None, params=None, headers=None):
"""
Returns information about hot threads on each node in the cluster.
:arg node_id: A comma-separated list of node IDs or names to
limit the returned information; use `_local` to return information from
the node you're connecting to, leave empty to get information from all
nodes
:arg doc_type: The type to sample (default: cpu) Valid choices:
cpu, wait, block
:arg ignore_idle_threads: Don't show threads that are in known-
idle places, such as waiting on a socket select or pulling from an empty
task queue (default: true)
:arg interval: The interval for the second sampling of threads
:arg snapshots: Number of samples of thread stacktrace (default:
10)
:arg threads: Specify the number of threads to provide
information for (default: 3)
:arg timeout: Explicit operation timeout
"""
# type is a reserved word so it cannot be used, use doc_type instead
if "doc_type" in params:
params["type"] = params.pop("doc_type")
return self.transport.perform_request(
"GET",
_make_path("_nodes", node_id, "hot_threads"),
params=params,
headers=headers,
)
@query_params("timeout")
def usage(self, node_id=None, metric=None, params=None, headers=None):
"""
Returns low-level information about REST actions usage on nodes.
:arg node_id: A comma-separated list of node IDs or names to
limit the returned information; use `_local` to return information from
the node you're connecting to, leave empty to get information from all
nodes
:arg metric: Limit the information returned to the specified
metrics Valid choices: _all, rest_actions
:arg timeout: Explicit operation timeout
"""
return self.transport.perform_request(
"GET",
_make_path("_nodes", node_id, "usage", metric),
params=params,
headers=headers,
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/client/nodes.py | 0.788787 | 0.222151 | nodes.py | pypi |
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class IngestClient(NamespacedClient):
@query_params("master_timeout", "cluster_manager_timeout", "summary")
def get_pipeline(self, id=None, params=None, headers=None):
"""
Returns a pipeline.
:arg id: Comma separated list of pipeline ids. Wildcards
supported
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg summary: Return pipelines without their definitions
(default: false)
"""
return self.transport.perform_request(
"GET", _make_path("_ingest", "pipeline", id), params=params, headers=headers
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
def put_pipeline(self, id, body, params=None, headers=None):
"""
Creates or updates a pipeline.
:arg id: Pipeline ID
:arg body: The ingest definition
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
"""
for param in (id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path("_ingest", "pipeline", id),
params=params,
headers=headers,
body=body,
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
def delete_pipeline(self, id, params=None, headers=None):
"""
Deletes a pipeline.
:arg id: Pipeline ID
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
:arg timeout: Explicit operation timeout
"""
if id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'id'.")
return self.transport.perform_request(
"DELETE",
_make_path("_ingest", "pipeline", id),
params=params,
headers=headers,
)
@query_params("verbose")
def simulate(self, body, id=None, params=None, headers=None):
"""
Allows to simulate a pipeline with example documents.
:arg body: The simulate definition
:arg id: Pipeline ID
:arg verbose: Verbose mode. Display data output for each
processor in executed pipeline
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"POST",
_make_path("_ingest", "pipeline", id, "_simulate"),
params=params,
headers=headers,
body=body,
)
@query_params()
def processor_grok(self, params=None, headers=None):
"""
Returns a list of the built-in patterns.
"""
return self.transport.perform_request(
"GET", "/_ingest/processor/grok", params=params, headers=headers
)
@query_params()
def geo_ip_stats(self, params=None, headers=None):
"""
Returns statistical information about geoip databases
"""
return self.transport.perform_request(
"GET", "/_ingest/geoip/stats", params=params, headers=headers
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/client/ingest.py | 0.609408 | 0.205635 | ingest.py | pypi |
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class IndicesClient(NamespacedClient):
@query_params()
def analyze(self, body=None, index=None, params=None, headers=None):
"""
Performs the analysis process on a text and return the tokens breakdown of the
text.
:arg body: Define analyzer/tokenizer parameters and the text on
which the analysis should be performed
:arg index: The name of the index to scope the operation
"""
return self.transport.perform_request(
"POST",
_make_path(index, "_analyze"),
params=params,
headers=headers,
body=body,
)
@query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
def refresh(self, index=None, params=None, headers=None):
"""
Performs the refresh operation in one or more indices.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
"""
return self.transport.perform_request(
"POST", _make_path(index, "_refresh"), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"force",
"ignore_unavailable",
"wait_if_ongoing",
)
def flush(self, index=None, params=None, headers=None):
"""
Performs the flush operation on one or more indices.
:arg index: A comma-separated list of index names; use `_all` or
empty string for all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg force: Whether a flush should be forced even if it is not
necessarily needed ie. if no changes will be committed to the index.
This is useful if transaction log IDs should be incremented even if no
uncommitted changes are present. (This setting can be considered as
internal)
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg wait_if_ongoing: If set to true the flush operation will
block until the flush can be executed if another flush operation is
already executing. The default is true. If set to false the flush will
be skipped iff if another flush operation is already running.
"""
return self.transport.perform_request(
"POST", _make_path(index, "_flush"), params=params, headers=headers
)
@query_params(
"master_timeout", "cluster_manager_timeout", "timeout", "wait_for_active_shards"
)
def create(self, index, body=None, params=None, headers=None):
"""
Creates an index with optional settings and mappings.
:arg index: The name of the index
:arg body: The configuration for the index (`settings` and
`mappings`)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Set the number of active shards to
wait for before the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"PUT", _make_path(index), params=params, headers=headers, body=body
)
@query_params(
"master_timeout", "cluster_manager_timeout", "timeout", "wait_for_active_shards"
)
def clone(self, index, target, body=None, params=None, headers=None):
"""
Clones an index
:arg index: The name of the source index to clone
:arg target: The name of the target index to clone into
:arg body: The configuration for the target index (`settings`
and `aliases`)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Set the number of active shards to
wait for on the cloned index before the operation returns.
"""
for param in (index, target):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path(index, "_clone", target),
params=params,
headers=headers,
body=body,
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flat_settings",
"ignore_unavailable",
"include_defaults",
"local",
"master_timeout",
"cluster_manager_timeout",
)
def get(self, index, params=None, headers=None):
"""
Returns information about one or more indices.
:arg index: A comma-separated list of index names
:arg allow_no_indices: Ignore if a wildcard expression resolves
to no concrete indices (default: false)
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
:arg flat_settings: Return settings in flat format (default:
false)
:arg ignore_unavailable: Ignore unavailable indexes (default:
false)
:arg include_defaults: Whether to return all default setting for
each of the indices.
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"GET", _make_path(index), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
)
def open(self, index, params=None, headers=None):
"""
Opens an index.
:arg index: A comma separated list of indices to open
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: closed
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Sets the number of active shards to
wait for before the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"POST", _make_path(index, "_open"), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
)
def close(self, index, params=None, headers=None):
"""
Closes an index.
:arg index: A comma separated list of indices to close
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Sets the number of active shards to
wait for before the operation returns. Set to `index-setting` to wait
according to the index setting `index.write.wait_for_active_shards`, or
`all` to wait for all shards, or an integer. Defaults to `0`.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"POST", _make_path(index, "_close"), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"timeout",
)
def delete(self, index, params=None, headers=None):
"""
Deletes an index.
:arg index: A comma-separated list of indices to delete; use
`_all` or `*` string to delete all indices
:arg allow_no_indices: Ignore if a wildcard expression resolves
to no concrete indices (default: false)
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Ignore unavailable indexes (default:
false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"DELETE", _make_path(index), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flat_settings",
"ignore_unavailable",
"include_defaults",
"local",
)
def exists(self, index, params=None, headers=None):
"""
Returns information about whether a particular index exists.
:arg index: A comma-separated list of index names
:arg allow_no_indices: Ignore if a wildcard expression resolves
to no concrete indices (default: false)
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
:arg flat_settings: Return settings in flat format (default:
false)
:arg ignore_unavailable: Ignore unavailable indexes (default:
false)
:arg include_defaults: Whether to return all default setting for
each of the indices.
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"HEAD", _make_path(index), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"write_index_only",
)
def put_mapping(self, body, index=None, params=None, headers=None):
"""
Updates the index mappings.
:arg body: The mapping definition
:arg index: A comma-separated list of index names the mapping
should be added to (supports wildcards); use `_all` or omit to add the
mapping on all indices.
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg write_index_only: When true, applies mappings only to the
write index of an alias or data stream
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
if index in SKIP_IN_PATH:
index = "_all"
return self.transport.perform_request(
"PUT",
_make_path(index, "_mapping"),
params=params,
headers=headers,
body=body,
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"local",
"master_timeout",
"cluster_manager_timeout",
)
def get_mapping(self, index=None, params=None, headers=None):
"""
Returns mappings for one or more indices.
:arg index: A comma-separated list of index names
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
"""
return self.transport.perform_request(
"GET",
_make_path(index, "_mapping"),
params=params,
headers=headers,
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"include_defaults",
"local",
)
def get_field_mapping(self, fields, index=None, params=None, headers=None):
"""
Returns mapping for one or more fields.
:arg fields: A comma-separated list of fields
:arg index: A comma-separated list of index names
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg include_defaults: Whether the default mapping values should
be returned as well
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
"""
if fields in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'fields'.")
return self.transport.perform_request(
"GET",
_make_path(index, "_mapping", "field", fields),
params=params,
headers=headers,
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
def put_alias(self, index, name, body=None, params=None, headers=None):
"""
Creates or updates an alias.
:arg index: A comma-separated list of index names the alias
should point to (supports wildcards); use `_all` to perform the
operation on all indices.
:arg name: The name of the alias to be created or updated
:arg body: The settings for the alias, such as `routing` or
`filter`
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit timestamp for the document
"""
for param in (index, name):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path(index, "_alias", name),
params=params,
headers=headers,
body=body,
)
@query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local")
def exists_alias(self, name, index=None, params=None, headers=None):
"""
Returns information about whether a particular alias exists.
:arg name: A comma-separated list of alias names to return
:arg index: A comma-separated list of index names to filter
aliases
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: all
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"HEAD", _make_path(index, "_alias", name), params=params, headers=headers
)
@query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local")
def get_alias(self, index=None, name=None, params=None, headers=None):
"""
Returns an alias.
:arg index: A comma-separated list of index names to filter
aliases
:arg name: A comma-separated list of alias names to return
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: all
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
"""
return self.transport.perform_request(
"GET", _make_path(index, "_alias", name), params=params, headers=headers
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
def update_aliases(self, body, params=None, headers=None):
"""
Updates index aliases.
:arg body: The definition of `actions` to perform
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Request timeout
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"POST", "/_aliases", params=params, headers=headers, body=body
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
def delete_alias(self, index, name, params=None, headers=None):
"""
Deletes an alias.
:arg index: A comma-separated list of index names (supports
wildcards); use `_all` for all indices
:arg name: A comma-separated list of aliases to delete (supports
wildcards); use `_all` to delete all aliases for the specified indices.
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit timestamp for the document
"""
for param in (index, name):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"DELETE", _make_path(index, "_alias", name), params=params, headers=headers
)
@query_params("create", "master_timeout", "cluster_manager_timeout", "order")
def put_template(self, name, body, params=None, headers=None):
"""
Creates or updates an index template.
:arg name: The name of the template
:arg body: The template definition
:arg create: Whether the index template should only be added if
new or can also replace an existing one
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg order: The order for this template when merging multiple
matching ones (higher numbers are merged later, overriding the lower
numbers)
"""
for param in (name, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path("_template", name),
params=params,
headers=headers,
body=body,
)
@query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout")
def exists_template(self, name, params=None, headers=None):
"""
Returns information about whether a particular index template exists.
:arg name: The comma separated names of the index templates
:arg flat_settings: Return settings in flat format (default:
false)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"HEAD", _make_path("_template", name), params=params, headers=headers
)
@query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout")
def get_template(self, name=None, params=None, headers=None):
"""
Returns an index template.
:arg name: The comma separated names of the index templates
:arg flat_settings: Return settings in flat format (default:
false)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
return self.transport.perform_request(
"GET", _make_path("_template", name), params=params, headers=headers
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
def delete_template(self, name, params=None, headers=None):
"""
Deletes an index template.
:arg name: The name of the template
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"DELETE", _make_path("_template", name), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flat_settings",
"ignore_unavailable",
"include_defaults",
"local",
"master_timeout",
"cluster_manager_timeout",
)
def get_settings(self, index=None, name=None, params=None, headers=None):
"""
Returns settings for one or more indices.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg name: The name of the settings that should be included
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: all
:arg flat_settings: Return settings in flat format (default:
false)
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg include_defaults: Whether to return all default setting for
each of the indices.
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
"""
return self.transport.perform_request(
"GET", _make_path(index, "_settings", name), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flat_settings",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"preserve_existing",
"timeout",
)
def put_settings(self, body, index=None, params=None, headers=None):
"""
Updates the index settings.
:arg body: The index settings to be updated
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg flat_settings: Return settings in flat format (default:
false)
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg preserve_existing: Whether to update existing settings. If
set to `true` existing settings on an index remain unchanged, the
default is `false`
:arg timeout: Explicit operation timeout
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"PUT",
_make_path(index, "_settings"),
params=params,
headers=headers,
body=body,
)
@query_params(
"completion_fields",
"expand_wildcards",
"fielddata_fields",
"fields",
"forbid_closed_indices",
"groups",
"include_segment_file_sizes",
"include_unloaded_segments",
"level",
"types",
)
def stats(self, index=None, metric=None, params=None, headers=None):
"""
Provides statistics on operations happening in an index.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg metric: Limit the information returned the specific
metrics. Valid choices: _all, completion, docs, fielddata, query_cache,
flush, get, indexing, merge, request_cache, refresh, search, segments,
store, warmer, suggest
:arg completion_fields: A comma-separated list of fields for
`fielddata` and `suggest` index metric (supports wildcards)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg fielddata_fields: A comma-separated list of fields for
`fielddata` index metric (supports wildcards)
:arg fields: A comma-separated list of fields for `fielddata`
and `completion` index metric (supports wildcards)
:arg forbid_closed_indices: If set to false stats will also
collected from closed indices if explicitly specified or if
expand_wildcards expands to closed indices Default: True
:arg groups: A comma-separated list of search groups for
`search` index metric
:arg include_segment_file_sizes: Whether to report the
aggregated disk usage of each one of the Lucene index files (only
applies if segment stats are requested)
:arg include_unloaded_segments: If set to true segment stats
will include stats for segments that are not currently loaded into
memory
:arg level: Return stats aggregated at cluster, index or shard
level Valid choices: cluster, indices, shards Default: indices
:arg types: A comma-separated list of document types for the
`indexing` index metric
"""
return self.transport.perform_request(
"GET", _make_path(index, "_stats", metric), params=params, headers=headers
)
@query_params(
"allow_no_indices", "expand_wildcards", "ignore_unavailable", "verbose"
)
def segments(self, index=None, params=None, headers=None):
"""
Provides low-level information about segments in a Lucene index.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg verbose: Includes detailed memory usage by Lucene.
"""
return self.transport.perform_request(
"GET", _make_path(index, "_segments"), params=params, headers=headers
)
@query_params(
"all_shards",
"allow_no_indices",
"analyze_wildcard",
"analyzer",
"default_operator",
"df",
"expand_wildcards",
"explain",
"ignore_unavailable",
"lenient",
"q",
"rewrite",
)
def validate_query(self, body=None, index=None, params=None, headers=None):
"""
Allows a user to validate a potentially expensive query without executing it.
:arg body: The query definition specified with the Query DSL
:arg index: A comma-separated list of index names to restrict
the operation; use `_all` or empty string to perform the operation on
all indices
restrict the operation; leave empty to perform the operation on all
types
:arg all_shards: Execute validation on all shards instead of one
random shard per index
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg analyze_wildcard: Specify whether wildcard and prefix
queries should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg default_operator: The default operator for query string
query (AND or OR) Valid choices: AND, OR Default: OR
:arg df: The field to use as default where no field prefix is
given in the query string
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg explain: Return detailed information about the error
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such
as providing text to a numeric field) should be ignored
:arg q: Query in the Lucene query string syntax
:arg rewrite: Provide a more detailed explanation showing the
actual Lucene query that will be executed.
"""
return self.transport.perform_request(
"POST",
_make_path(index, "_validate", "query"),
params=params,
headers=headers,
body=body,
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"fielddata",
"fields",
"ignore_unavailable",
"query",
"request",
)
def clear_cache(self, index=None, params=None, headers=None):
"""
Clears all or specific caches for one or more indices.
:arg index: A comma-separated list of index name to limit the
operation
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg fielddata: Clear field data
:arg fields: A comma-separated list of fields to clear when
using the `fielddata` parameter (default: all)
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg query: Clear query caches
:arg request: Clear request cache
"""
return self.transport.perform_request(
"POST", _make_path(index, "_cache", "clear"), params=params, headers=headers
)
@query_params("active_only", "detailed")
def recovery(self, index=None, params=None, headers=None):
"""
Returns information about ongoing index shard recoveries.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg active_only: Display only those recoveries that are
currently on-going
:arg detailed: Whether to display detailed information about
shard recovery
"""
return self.transport.perform_request(
"GET", _make_path(index, "_recovery"), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"only_ancient_segments",
"wait_for_completion",
)
def upgrade(self, index=None, params=None, headers=None):
"""
DEPRECATED Upgrades to the current version of Lucene.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg only_ancient_segments: If true, only ancient (an older
Lucene major release) segments will be upgraded
:arg wait_for_completion: Specify whether the request should
block until the all segments are upgraded (default: false)
"""
return self.transport.perform_request(
"POST", _make_path(index, "_upgrade"), params=params, headers=headers
)
@query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
def get_upgrade(self, index=None, params=None, headers=None):
"""
DEPRECATED Returns a progress status of current upgrade.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
"""
return self.transport.perform_request(
"GET", _make_path(index, "_upgrade"), params=params, headers=headers
)
@query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
def flush_synced(self, index=None, params=None, headers=None):
"""
Performs a synced flush operation on one or more indices. Synced flush is
deprecated. Use flush instead
:arg index: A comma-separated list of index names; use `_all` or
empty string for all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
"""
return self.transport.perform_request(
"POST",
_make_path(index, "_flush", "synced"),
params=params,
headers=headers,
)
@query_params(
"allow_no_indices", "expand_wildcards", "ignore_unavailable", "status"
)
def shard_stores(self, index=None, params=None, headers=None):
"""
Provides store information for shard copies of indices.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg status: A comma-separated list of statuses used to filter
on shards to get store information for Valid choices: green, yellow,
red, all
"""
return self.transport.perform_request(
"GET", _make_path(index, "_shard_stores"), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flush",
"ignore_unavailable",
"max_num_segments",
"only_expunge_deletes",
)
def forcemerge(self, index=None, params=None, headers=None):
"""
Performs the force merge operation on one or more indices.
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg flush: Specify whether the index should be flushed after
performing the operation (default: true)
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg max_num_segments: The number of segments the index should
be merged into (default: dynamic)
:arg only_expunge_deletes: Specify whether the operation should
only expunge deleted documents
"""
return self.transport.perform_request(
"POST", _make_path(index, "_forcemerge"), params=params, headers=headers
)
@query_params(
"copy_settings",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
)
def shrink(self, index, target, body=None, params=None, headers=None):
"""
Allow to shrink an existing index into a new index with fewer primary shards.
:arg index: The name of the source index to shrink
:arg target: The name of the target index to shrink into
:arg body: The configuration for the target index (`settings`
and `aliases`)
:arg copy_settings: whether or not to copy settings from the
source index (defaults to false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Set the number of active shards to
wait for on the shrunken index before the operation returns.
"""
for param in (index, target):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path(index, "_shrink", target),
params=params,
headers=headers,
body=body,
)
@query_params(
"copy_settings",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
)
def split(self, index, target, body=None, params=None, headers=None):
"""
Allows you to split an existing index into a new index with more primary
shards.
:arg index: The name of the source index to split
:arg target: The name of the target index to split into
:arg body: The configuration for the target index (`settings`
and `aliases`)
:arg copy_settings: whether or not to copy settings from the
source index (defaults to false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Set the number of active shards to
wait for on the shrunken index before the operation returns.
"""
for param in (index, target):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path(index, "_split", target),
params=params,
headers=headers,
body=body,
)
@query_params(
"dry_run",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
)
def rollover(self, alias, body=None, new_index=None, params=None, headers=None):
"""
Updates an alias to point to a new index when the existing index is considered
to be too large or too old.
:arg alias: The name of the alias to rollover
:arg body: The conditions that needs to be met for executing
rollover
:arg new_index: The name of the rollover index
:arg dry_run: If set to true the rollover action will only be
validated but not actually performed even if a condition matches. The
default is false
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Set the number of active shards to
wait for on the newly created rollover index before the operation
returns.
"""
if alias in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'alias'.")
return self.transport.perform_request(
"POST",
_make_path(alias, "_rollover", new_index),
params=params,
headers=headers,
body=body,
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
)
def freeze(self, index, params=None, headers=None):
"""
Freezes an index. A frozen index has almost no overhead on the cluster (except
for maintaining its metadata in memory) and is read-only.
:arg index: The name of the index to freeze
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: closed
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Sets the number of active shards to
wait for before the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"POST", _make_path(index, "_freeze"), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"timeout",
"wait_for_active_shards",
)
def unfreeze(self, index, params=None, headers=None):
"""
Unfreezes an index. When a frozen index is unfrozen, the index goes through the
normal recovery process and becomes writeable again.
:arg index: The name of the index to unfreeze
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: closed
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Sets the number of active shards to
wait for before the operation returns.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"POST", _make_path(index, "_unfreeze"), params=params, headers=headers
)
@query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable")
def reload_search_analyzers(self, index, params=None, headers=None):
"""
Reloads an index's search analyzers and their resources.
:arg index: A comma-separated list of index names to reload
analyzers for
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"GET",
_make_path(index, "_reload_search_analyzers"),
params=params,
headers=headers,
)
@query_params()
def create_data_stream(self, name, params=None, headers=None):
"""
Creates a data stream
:arg name: The name of the data stream
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"PUT", _make_path("_data_stream", name), params=params, headers=headers
)
@query_params("expand_wildcards")
def delete_data_stream(self, name, params=None, headers=None):
"""
Deletes a data stream.
:arg name: A comma-separated list of data streams to delete; use
`*` to delete all data streams
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"DELETE", _make_path("_data_stream", name), params=params, headers=headers
)
@query_params("master_timeout", "cluster_manager_timeout", "timeout")
def delete_index_template(self, name, params=None, headers=None):
"""
Deletes an index template.
:arg name: The name of the template
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"DELETE",
_make_path("_index_template", name),
params=params,
headers=headers,
)
@query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout")
def exists_index_template(self, name, params=None, headers=None):
"""
Returns information about whether a particular index template exists.
:arg name: The name of the template
:arg flat_settings: Return settings in flat format (default:
false)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"HEAD", _make_path("_index_template", name), params=params, headers=headers
)
@query_params("flat_settings", "local", "master_timeout", "cluster_manager_timeout")
def get_index_template(self, name=None, params=None, headers=None):
"""
Returns an index template.
:arg name: The comma separated names of the index templates
:arg flat_settings: Return settings in flat format (default:
false)
:arg local: Return local information, do not retrieve the state
from cluster_manager node (default: false)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Explicit operation timeout for connection
to master node
:arg cluster_manager_timeout: Explicit operation timeout for connection
to cluster_manager node
"""
return self.transport.perform_request(
"GET", _make_path("_index_template", name), params=params, headers=headers
)
@query_params("cause", "create", "master_timeout", "cluster_manager_timeout")
def put_index_template(self, name, body, params=None, headers=None):
"""
Creates or updates an index template.
:arg name: The name of the template
:arg body: The template definition
:arg cause: User defined reason for creating/updating the index
template
:arg create: Whether the index template should only be added if
new or can also replace an existing one
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
"""
for param in (name, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path("_index_template", name),
params=params,
headers=headers,
body=body,
)
@query_params("cause", "create", "master_timeout", "cluster_manager_timeout")
def simulate_index_template(self, name, body=None, params=None, headers=None):
"""
Simulate matching the given index name against the index templates in the
system
:arg name: The name of the index (it must be a concrete index
name)
:arg body: New index template definition, which will be included
in the simulation, as if it already exists in the system
:arg cause: User defined reason for dry-run creating the new
template for simulation purposes
:arg create: Whether the index template we optionally defined in
the body should only be dry-run added if new or can also replace an
existing one
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"POST",
_make_path("_index_template", "_simulate_index", name),
params=params,
headers=headers,
body=body,
)
@query_params("expand_wildcards")
def get_data_stream(self, name=None, params=None, headers=None):
"""
Returns data streams.
:arg name: A comma-separated list of data streams to get; use
`*` to get all data streams
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
"""
return self.transport.perform_request(
"GET", _make_path("_data_stream", name), params=params, headers=headers
)
@query_params("cause", "create", "master_timeout", "cluster_manager_timeout")
def simulate_template(self, body=None, name=None, params=None, headers=None):
"""
Simulate resolving the given template name or body
:arg body: New index template definition to be simulated, if no
index template name is specified
:arg name: The name of the index template
:arg cause: User defined reason for dry-run creating the new
template for simulation purposes
:arg create: Whether the index template we optionally defined in
the body should only be dry-run added if new or can also replace an
existing one
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
"""
return self.transport.perform_request(
"POST",
_make_path("_index_template", "_simulate", name),
params=params,
headers=headers,
body=body,
)
@query_params("expand_wildcards")
def resolve_index(self, name, params=None, headers=None):
"""
Returns information about any matching indices, aliases, and data streams
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg name: A comma-separated list of names or wildcard
expressions
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"GET", _make_path("_resolve", "index", name), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"cluster_manager_timeout",
"timeout",
)
def add_block(self, index, block, params=None, headers=None):
"""
Adds a block to an index.
:arg index: A comma separated list of indices to add a block to
:arg block: The block to add (one of read, write, read_only or
metadata)
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout (Deprecated: use cluster_manager_timeout): Specify timeout for connection to master
:arg cluster_manager_timeout: Specify timeout for connection to cluster_manager
:arg timeout: Explicit operation timeout
"""
for param in (index, block):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT", _make_path(index, "_block", block), params=params, headers=headers
)
@query_params()
def data_streams_stats(self, name=None, params=None, headers=None):
"""
Provides statistics on operations happening in a data stream.
:arg name: A comma-separated list of data stream names; use
`_all` or empty string to perform the operation on all data streams
"""
return self.transport.perform_request(
"GET",
_make_path("_data_stream", name, "_stats"),
params=params,
headers=headers,
)
@query_params()
def promote_data_stream(self, name, params=None, headers=None):
"""
Promotes a data stream from a replicated data stream managed by CCR to a
regular data stream
:arg name: The name of the data stream
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"POST",
_make_path("_data_stream", "_promote", name),
params=params,
headers=headers,
)
@query_params()
def migrate_to_data_stream(self, name, params=None, headers=None):
"""
Migrates an alias to a data stream
:arg name: The name of the alias to migrate
"""
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return self.transport.perform_request(
"POST",
_make_path("_data_stream", "_migrate", name),
params=params,
headers=headers,
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flush",
"ignore_unavailable",
"run_expensive_tasks",
)
def disk_usage(self, index, params=None, headers=None):
"""
Analyzes the disk usage of each field of an index or data stream
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg index: Comma-separated list of indices or data streams to
analyze the disk usage
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg flush: Whether flush or not before analyzing the index disk
usage. Defaults to true
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg run_expensive_tasks: Must be set to [true] in order for the
task to be performed. Defaults to false.
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"POST", _make_path(index, "_disk_usage"), params=params, headers=headers
)
@query_params(
"allow_no_indices", "expand_wildcards", "fields", "ignore_unavailable"
)
def field_usage_stats(self, index, params=None, headers=None):
"""
Returns the field usage stats for each field of an index
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg index: A comma-separated list of index names; use `_all` or
empty string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg fields: A comma-separated list of fields to include in the
stats if only a subset of fields should be returned (supports wildcards)
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"GET",
_make_path(index, "_field_usage_stats"),
params=params,
headers=headers,
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/client/indices.py | 0.689619 | 0.344581 | indices.py | pypi |
import warnings
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class TasksClient(NamespacedClient):
@query_params(
"actions",
"detailed",
"group_by",
"nodes",
"parent_task_id",
"timeout",
"wait_for_completion",
)
def list(self, params=None, headers=None):
"""
Returns a list of tasks.
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg actions: A comma-separated list of actions that should be
returned. Leave empty to return all.
:arg detailed: Return detailed task information (default: false)
:arg group_by: Group tasks by nodes or parent/child
relationships Valid choices: nodes, parents, none Default: nodes
:arg nodes: A comma-separated list of node IDs or names to limit
the returned information; use `_local` to return information from the
node you're connecting to, leave empty to get information from all nodes
:arg parent_task_id: Return tasks with specified parent task id
(node_id:task_number). Set to -1 to return all.
:arg timeout: Explicit operation timeout
:arg wait_for_completion: Wait for the matching tasks to
complete (default: false)
"""
return self.transport.perform_request(
"GET", "/_tasks", params=params, headers=headers
)
@query_params("actions", "nodes", "parent_task_id", "wait_for_completion")
def cancel(self, task_id=None, params=None, headers=None):
"""
Cancels a task, if it can be cancelled through an API.
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg task_id: Cancel the task with specified task id
(node_id:task_number)
:arg actions: A comma-separated list of actions that should be
cancelled. Leave empty to cancel all.
:arg nodes: A comma-separated list of node IDs or names to limit
the returned information; use `_local` to return information from the
node you're connecting to, leave empty to get information from all nodes
:arg parent_task_id: Cancel tasks with specified parent task id
(node_id:task_number). Set to -1 to cancel all.
:arg wait_for_completion: Should the request block until the
cancellation of the task and its descendant tasks is completed. Defaults
to false
"""
return self.transport.perform_request(
"POST",
_make_path("_tasks", task_id, "_cancel"),
params=params,
headers=headers,
)
@query_params("timeout", "wait_for_completion")
def get(self, task_id=None, params=None, headers=None):
"""
Returns information about a task.
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg task_id: Return the task with specified id
(node_id:task_number)
:arg timeout: Explicit operation timeout
:arg wait_for_completion: Wait for the matching tasks to
complete (default: false)
"""
if task_id in SKIP_IN_PATH:
warnings.warn(
"Calling client.tasks.get() without a task_id is deprecated "
"and will be removed in v8.0. Use client.tasks.list() instead.",
category=DeprecationWarning,
stacklevel=3,
)
return self.transport.perform_request(
"GET", _make_path("_tasks", task_id), params=params, headers=headers
) | /retakesearch-py-2.2.5.tar.gz/retakesearch-py-2.2.5/opensearchpy/client/tasks.py | 0.613005 | 0.269365 | tasks.py | pypi |
from typing import List, Union
import logging
from retarus.commons.config import Configuration
from .model import Client
from retarus.commons.region import RegionUri
from retarus.commons.exceptions import RetarusSDKError
from retarus.commons.transport import Transporter
class RetarusRessourceNotFound(RetarusSDKError):
pass
class AsyncClient(Client):
def __init__(self, out_path: str, page_size: int, timeout: int, url: List[RegionUri]):
# checks if the specified [out_pat] ends with a slash so the name of the to save file can be appended
if not out_path.endswith("/") and len(out_path) != 0:
out_path += "/"
self.out_path = out_path
self.page_size = page_size
self.timeout = timeout
self.transporter = Transporter(url)
async def fetch_fax_list(self, topic: str, ids: List[str] = None):
data = []
path = f"/topics/{topic}"
if not ids is None:
ids = "%".join(ids)
query_params = {"fetch": self.page_size, "timeout": self.timeout, "ids": ids}
res = await self.transporter.post(path, {}, remove_none(query_params))
if "results" in res:
data = res["results"]
return data
async def download_fax(self, doc_url: str):
filename = doc_url.split("/")[-1]
uri = f"files/{filename}"
res = await self.transporter.get(uri)
with open(f"{self.out_path}{filename}", "wb") as file:
file.write(res)
if file == False:
raise RetarusRessourceNotFound()
async def acknowledge_fax(self, topic: str, ids: List[str]) -> Union[list, bool]:
path = f"/topics/{topic}"
if not ids is None:
ids:str = ",".join(ids)
ids.encode("utf8")
query_params = {"fetch": 0, "timeout": self.timeout, "ids": ids, "toString": ""}
res = await self.transporter.post(path, {}, remove_none(query_params))
if "results" in res:
return []
return res
def remove_none(data: dict) -> dict:
xy = {}
for x, y in data.items():
if y == None:
continue
xy[x] = y
return xy | /retarus_fax-1.0.1-py3-none-any.whl/retarus/fax_in_poll/_async.py | 0.725357 | 0.185615 | _async.py | pypi |
from __future__ import annotations
from typing import List, Optional
from pydantic import BaseModel, validator
from retarus.commons.utils import to_camel_case
class Options(BaseModel):
src: Optional[str]
encoding: Optional[str]
billcode: Optional[str]
status_requested: Optional[bool]
flash: Optional[bool]
customer_ref: Optional[str]
validity_min: Optional[int]
max_parts: Optional[int]
invalid_characters: Optional[str]
qos: Optional[str]
job_period: Optional[str]
duplicate_detection: Optional[bool]
blackout_periods: Optional[List[str]]
class Config:
alias_generator = to_camel_case
class Recipient(BaseModel):
dst: str
customer_ref: Optional[str]
blackout_periods: Optional[List[str]]
class Config:
alias_generator = to_camel_case
class Messages(BaseModel):
text: str
recipients: List[Recipient]
class SmsJob(BaseModel):
"""
Create an instance of a SmsJob, set all your needed properties and dispatch it to the Retarus server to send it.
options: Set special properties how the sms should be processed.
messages*: set your message that you want to send.
"""
options: Optional[Options]
messages: List[Messages]
class Config:
alias_generator = to_camel_case
def exclude_optional_dict(model: BaseModel):
return {**model.dict(exclude_unset=True), **model.dict(exclude_none=True)}
def minimal(number: str, message: str) -> SmsJob:
return SmsJob(messages=[Messages(text=message, recipients=[Recipient(dst=number)])])
class JobReport(BaseModel):
job_id: str
src: str
encoding: str
billcode: str
status_requested: bool
flash: bool
validity_min: int
customer_ref: str
qos: str
receipt_ts: str
finished_ts: str
recipient_ids: List[str]
class Config:
alias_generator = to_camel_case
class Client(object):
def send_sms(self, sms: SmsJob):
pass
def get_sms_job(self, job_id: str) -> dict:
pass
def filter_sms_jobs(self, *args, **kwargs):
pass
def server_version(self):
pass | /retarus_sms-1.0.1-py3-none-any.whl/retarus/sms/model.py | 0.926162 | 0.325829 | model.py | pypi |
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import aws_cdk.aws_ec2
import aws_cdk.aws_iam
import aws_cdk.core
import constructs
class ECRDeployment(
aws_cdk.core.Construct,
metaclass=jsii.JSIIMeta,
jsii_type="retbrown-cdk-ecr-deployment.ECRDeployment",
):
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
dest: "IImageName",
src: "IImageName",
environment: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
memory_limit: typing.Optional[jsii.Number] = None,
role: typing.Optional[aws_cdk.aws_iam.IRole] = None,
vpc: typing.Optional[aws_cdk.aws_ec2.IVpc] = None,
vpc_subnets: typing.Optional[aws_cdk.aws_ec2.SubnetSelection] = None,
) -> None:
'''
:param scope: -
:param id: -
:param dest: The destination of the docker image.
:param src: The source of the docker image.
:param environment: The environment variable to set.
:param memory_limit: The amount of memory (in MiB) to allocate to the AWS Lambda function which replicates the files from the CDK bucket to the destination bucket. If you are deploying large files, you will need to increase this number accordingly. Default: 512
:param role: Execution role associated with this function. Default: - A role is automatically created
:param vpc: The VPC network to place the deployment lambda handler in. Default: None
:param vpc_subnets: Where in the VPC to place the deployment lambda handler. Only used if 'vpc' is supplied. Default: - the Vpc default strategy if not specified
'''
props = ECRDeploymentProps(
dest=dest,
src=src,
environment=environment,
memory_limit=memory_limit,
role=role,
vpc=vpc,
vpc_subnets=vpc_subnets,
)
jsii.create(self.__class__, self, [scope, id, props])
@jsii.data_type(
jsii_type="retbrown-cdk-ecr-deployment.ECRDeploymentProps",
jsii_struct_bases=[],
name_mapping={
"dest": "dest",
"src": "src",
"environment": "environment",
"memory_limit": "memoryLimit",
"role": "role",
"vpc": "vpc",
"vpc_subnets": "vpcSubnets",
},
)
class ECRDeploymentProps:
def __init__(
self,
*,
dest: "IImageName",
src: "IImageName",
environment: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
memory_limit: typing.Optional[jsii.Number] = None,
role: typing.Optional[aws_cdk.aws_iam.IRole] = None,
vpc: typing.Optional[aws_cdk.aws_ec2.IVpc] = None,
vpc_subnets: typing.Optional[aws_cdk.aws_ec2.SubnetSelection] = None,
) -> None:
'''
:param dest: The destination of the docker image.
:param src: The source of the docker image.
:param environment: The environment variable to set.
:param memory_limit: The amount of memory (in MiB) to allocate to the AWS Lambda function which replicates the files from the CDK bucket to the destination bucket. If you are deploying large files, you will need to increase this number accordingly. Default: 512
:param role: Execution role associated with this function. Default: - A role is automatically created
:param vpc: The VPC network to place the deployment lambda handler in. Default: None
:param vpc_subnets: Where in the VPC to place the deployment lambda handler. Only used if 'vpc' is supplied. Default: - the Vpc default strategy if not specified
'''
if isinstance(vpc_subnets, dict):
vpc_subnets = aws_cdk.aws_ec2.SubnetSelection(**vpc_subnets)
self._values: typing.Dict[str, typing.Any] = {
"dest": dest,
"src": src,
}
if environment is not None:
self._values["environment"] = environment
if memory_limit is not None:
self._values["memory_limit"] = memory_limit
if role is not None:
self._values["role"] = role
if vpc is not None:
self._values["vpc"] = vpc
if vpc_subnets is not None:
self._values["vpc_subnets"] = vpc_subnets
@builtins.property
def dest(self) -> "IImageName":
'''The destination of the docker image.'''
result = self._values.get("dest")
assert result is not None, "Required property 'dest' is missing"
return typing.cast("IImageName", result)
@builtins.property
def src(self) -> "IImageName":
'''The source of the docker image.'''
result = self._values.get("src")
assert result is not None, "Required property 'src' is missing"
return typing.cast("IImageName", result)
@builtins.property
def environment(
self,
) -> typing.Optional[typing.Mapping[builtins.str, builtins.str]]:
'''The environment variable to set.'''
result = self._values.get("environment")
return typing.cast(typing.Optional[typing.Mapping[builtins.str, builtins.str]], result)
@builtins.property
def memory_limit(self) -> typing.Optional[jsii.Number]:
'''The amount of memory (in MiB) to allocate to the AWS Lambda function which replicates the files from the CDK bucket to the destination bucket.
If you are deploying large files, you will need to increase this number
accordingly.
:default: 512
'''
result = self._values.get("memory_limit")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:
'''Execution role associated with this function.
:default: - A role is automatically created
'''
result = self._values.get("role")
return typing.cast(typing.Optional[aws_cdk.aws_iam.IRole], result)
@builtins.property
def vpc(self) -> typing.Optional[aws_cdk.aws_ec2.IVpc]:
'''The VPC network to place the deployment lambda handler in.
:default: None
'''
result = self._values.get("vpc")
return typing.cast(typing.Optional[aws_cdk.aws_ec2.IVpc], result)
@builtins.property
def vpc_subnets(self) -> typing.Optional[aws_cdk.aws_ec2.SubnetSelection]:
'''Where in the VPC to place the deployment lambda handler.
Only used if 'vpc' is supplied.
:default: - the Vpc default strategy if not specified
'''
result = self._values.get("vpc_subnets")
return typing.cast(typing.Optional[aws_cdk.aws_ec2.SubnetSelection], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "ECRDeploymentProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.interface(jsii_type="retbrown-cdk-ecr-deployment.IImageName")
class IImageName(typing_extensions.Protocol):
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="uri")
def uri(self) -> builtins.str:
'''The uri of the docker image.
The uri spec follows https://github.com/containers/skopeo
'''
...
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="creds")
def creds(self) -> typing.Optional[builtins.str]:
'''The credentials of the docker image.
Format ``user:[password]``
'''
...
@creds.setter
def creds(self, value: typing.Optional[builtins.str]) -> None:
...
class _IImageNameProxy:
__jsii_type__: typing.ClassVar[str] = "retbrown-cdk-ecr-deployment.IImageName"
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="uri")
def uri(self) -> builtins.str:
'''The uri of the docker image.
The uri spec follows https://github.com/containers/skopeo
'''
return typing.cast(builtins.str, jsii.get(self, "uri"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="creds")
def creds(self) -> typing.Optional[builtins.str]:
'''The credentials of the docker image.
Format ``user:[password]``
'''
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "creds"))
@creds.setter
def creds(self, value: typing.Optional[builtins.str]) -> None:
jsii.set(self, "creds", value)
# Adding a "__jsii_proxy_class__(): typing.Type" function to the interface
typing.cast(typing.Any, IImageName).__jsii_proxy_class__ = lambda : _IImageNameProxy
@jsii.implements(IImageName)
class S3ArchiveName(
metaclass=jsii.JSIIMeta,
jsii_type="retbrown-cdk-ecr-deployment.S3ArchiveName",
):
def __init__(
self,
p: builtins.str,
ref: typing.Optional[builtins.str] = None,
creds: typing.Optional[builtins.str] = None,
) -> None:
'''
:param p: -
:param ref: -
:param creds: -
'''
jsii.create(self.__class__, self, [p, ref, creds])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="uri")
def uri(self) -> builtins.str:
'''The uri of the docker image.
The uri spec follows https://github.com/containers/skopeo
'''
return typing.cast(builtins.str, jsii.get(self, "uri"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="creds")
def creds(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "creds"))
@creds.setter
def creds(self, value: typing.Optional[builtins.str]) -> None:
jsii.set(self, "creds", value)
@jsii.implements(IImageName)
class DockerImageName(
metaclass=jsii.JSIIMeta,
jsii_type="retbrown-cdk-ecr-deployment.DockerImageName",
):
def __init__(
self,
name: builtins.str,
creds: typing.Optional[builtins.str] = None,
) -> None:
'''
:param name: -
:param creds: -
'''
jsii.create(self.__class__, self, [name, creds])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="uri")
def uri(self) -> builtins.str:
'''The uri of the docker image.
The uri spec follows https://github.com/containers/skopeo
'''
return typing.cast(builtins.str, jsii.get(self, "uri"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="creds")
def creds(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "creds"))
@creds.setter
def creds(self, value: typing.Optional[builtins.str]) -> None:
jsii.set(self, "creds", value)
__all__ = [
"DockerImageName",
"ECRDeployment",
"ECRDeploymentProps",
"IImageName",
"S3ArchiveName",
]
publication.publish() | /retbrown-cdk-ecr-deployment-1.0.3.tar.gz/retbrown-cdk-ecr-deployment-1.0.3/src/retbrown_cdk_ecr_deployment/__init__.py | 0.685002 | 0.242531 | __init__.py | pypi |
import contextlib
import datetime
import os
import shutil
import time
class Resource:
"""Base class of all resources.
:param str id: Unique identifier of the resource.
:param retdec.conn.APIConnection conn: Connection to the API to be used for
sending API requests.
"""
#: Time interval after which we can update resource's state.
_STATE_UPDATE_INTERVAL = datetime.timedelta(seconds=0.5)
def __init__(self, id, conn):
self._id = id
self._conn = conn
# To prevent abuse of the API, we update the state of the resource only
# once in a while. To keep track whether we should perform an update,
# we store the date and time of the last update. By initializing it to
# the minimal representable date, we ensure that the resource gets
# updated upon the first call of a state-checking method, like
# has_finished().
# See the implementation of _state_should_be_updated() for more
# details.
self._last_updated = datetime.datetime.min
@property
def id(self):
"""Unique identifier of the resource."""
return self._id
def is_pending(self):
"""Is the resource in a pending state?
A resource is *pending* if it is scheduled to run but has not started
yet.
"""
self._update_state_if_needed()
return self._pending
def is_running(self):
"""Is the resource currently running?"""
self._update_state_if_needed()
return self._running
def has_finished(self):
"""Has the resource finished?"""
self._update_state_if_needed()
return self._finished
def has_succeeded(self):
"""Has the resource succeeded?"""
self._update_state_if_needed()
return self._finished
def has_failed(self):
"""Has the resource failed?
For finished resources, this is always the negation of
:func:`has_succeeded()`.
"""
self._update_state_if_needed()
return self._failed
def get_error(self):
"""Returns the reason why the resource failed.
If the resource has not failed, it returns ``None``.
"""
self._update_state_if_needed()
return self._error
def _update_state_if_needed(self):
"""Updates the state of the resource (if needed)."""
if self._state_should_be_updated():
self._update_state()
def _state_should_be_updated(self):
"""Should the state of the resource be updated?"""
# To prevent abuse of the API, update the status only once in a while.
now = datetime.datetime.now()
return (now - self._last_updated) > self._STATE_UPDATE_INTERVAL
def _wait_until_state_can_be_updated(self):
"""Waits until the state can be updated."""
time.sleep(self._STATE_UPDATE_INTERVAL.total_seconds())
def _update_state(self):
"""Updates the state of the resource."""
status = self._get_status()
self._pending = status['pending']
self._running = status['running']
self._finished = status['finished']
self._succeeded = status['succeeded']
self._failed = status['failed']
self._error = status['error']
self._last_updated = datetime.datetime.now()
return status
def _get_status(self):
"""Obtains and returns the current status of the resource."""
return self._conn.send_get_request('/{}/status'.format(self.id))
def _handle_failure(self, on_failure, *args):
"""Handles the situation where a resource failed to succeed.
:param callable on_failure: What should be done when the resource
failed?
If `on_failure` is ``None``, nothing is done when the resource failed.
Otherwise, it is called with `*args`. If the returned value is an
exception, it is raised.
"""
if on_failure is not None:
obj = on_failure(*args)
if isinstance(obj, Exception):
raise obj
def _get_file_contents(self, file_path, is_text_file):
"""Obtains the contents of a file from the given path.
:param str file_path: Path to the file to be downloaded.
:param bool is_text_file: Is it a text file or a binary file?
"""
with contextlib.closing(self._conn.get_file(file_path)) as file:
contents = file.read()
if is_text_file:
contents = contents.decode()
return contents
def _get_file_and_save_it(self, file_path, directory=None):
"""Obtains a file from `file_path` and saves it to `directory`.
:param str file_path: Path to the file to be downloaded.
:param str directory: Directory in which the file will be stored.
:returns: Path to the saved file (`str`).
If `directory` is ``None``, the current working directory is used.
"""
directory = directory or os.getcwd()
with contextlib.closing(self._conn.get_file(file_path)) as src:
dst_path = os.path.join(directory, src.name)
with open(dst_path, 'wb') as dst:
shutil.copyfileobj(src, dst)
return dst_path | /retdec-python-0.5.2.tar.gz/retdec-python-0.5.2/retdec/resource.py | 0.792986 | 0.291718 | resource.py | pypi |
from retdec.decompilation import Decompilation
from retdec.exceptions import MissingParameterError
from retdec.file import File
from retdec.service import Service
class Decompiler(Service):
"""Access to the decompilation service."""
def start_decompilation(self, **kwargs):
"""Starts a decompilation with the given parameters.
:param input_file: File to be analyzed (**required**).
:type input_file: str or file-like object
:param pdb_file: A PDB file associated with `input_file` containing
debugging information.
:type pdb_file: str or file-like object
:param mode: Decompilation mode.
:type mode: str
:param target_language: Target high-level language.
:type target_language: str
:param graph_format: Format of the generated call and control-flow
graphs.
:type graph_format: str
:param decomp_var_names: Naming style for variables.
:type decomp_var_names: str
:param decomp_optimizations: Level of optimizations performed by the
decompiler.
:type decomp_optimizations: str
:param decomp_unreach_funcs: Should all functions be decompiled, even
if they are not reachable from the main function?
:type decomp_unreach_funcs: bool
:param decomp_emit_addresses: Should addresses in comments be emitted
in the generated code?
:type decomp_emit_addresses: bool
:param architecture: Architecture. The precise meaning depends on the
used `mode`.
:type architecture: str
:param file_format: File format. File format to be used when compiling
input C source files.
:type file_format: str
:param comp_compiler: Compiler to be used when compiling input C source
files.
:type comp_compiler: str
:param comp_optimizations: Compiler optimizations to be used when
compiling input C source files.
:type comp_optimizations: str
:param comp_debug: Should the input C source file be compiled with
debugging information?
:type comp_debug: bool
:param comp_strip: Should the compiled input C source file be stripped?
:type comp_strip: bool
:param sel_decomp_funcs: Decompile only the selected functions. It can
be either an iterable of function names (e.g. ``['func1', 'func2']``) or
a string with comma-separated function names (e.g. ``'func1,
func2'``).
:type sel_decomp_funcs: str/iterable
:param sel_decomp_ranges: Decompile only the selected address ranges.
It can be either an iterable of ranges (e.g. ``[(0x100, 0x200),
(0x400, 0x500)]``) or a string with comma-separated ranges (e.g.
``'0x100-0x200,0x400-0x500'``).
:type sel_decomp_ranges: str/iterable
:param sel_decomp_decoding: What instructions should be decoded when
either `sel_decomp_funcs` or `sel_decomp_ranges` is given?
:type sel_decomp_decoding: str
:param endian: Endianness of the machine code (``'little'`` or
``'big'``). Only for the ``raw`` `mode`.
:type endian: str
:param raw_entry_point: Virtual memory address where execution
flow should start in the raw machine code. Only for the ``raw``
`mode`.
:type raw_entry_point: str
:param raw_section_vma: Address where the section created from the raw
machine code will be placed in virtual memory. Only for the
``raw`` `mode`.
:type raw_section_vma: str
:param ar_index: Index of the object file in the input archive to be
decompiled when decompiling an archive.
:type ar_index: int/str
:param ar_name: Name of the object file in the input archive to be
decompiled when decompiling an archive.
:type ar_name: str
:param generate_cg: Should a call graph be generated?
:type generate_cg: bool
:param generate_cfgs: Should control-flow graphs for all functions be
generated?
:type generate_cfgs: bool
:param generate_archive: Should an archive containing all outputs from
the decompilation be generated?
:type generate_archive: bool
:returns: Started decompilation
(:class:`~retdec.decompilation.Decompilation`).
If `mode` is not given, it is automatically determined based on the
name of ``input_file``. If the input file ends with ``.c`` or ``.C``,
the `mode` is set to ``c``. Otherwise, the `mode` is set to ``bin``.
See the `official documentation
<https://retdec.com/api/docs/decompiler.html#decompilation-parameters>`_
for more information about the parameters.
"""
conn = self._create_new_api_connection('/decompiler/decompilations')
id = self._start_decompilation(conn, kwargs)
return Decompilation(id, conn)
def _start_decompilation(self, conn, kwargs):
"""Starts a decompilation with the given parameters.
:param retdec.conn.APIConnection conn: Connection to the API to be used
for sending API requests.
:param dict kwargs: Parameters for the decompilation.
:returns: Unique identifier of the decompilation.
"""
files = {
'input': self._get_input_file(kwargs)
}
self._add_pdb_file_when_given(files, kwargs)
params = {
'mode': self._get_mode_param(files['input'], kwargs)
}
self._add_param_when_given('target_language', params, kwargs)
self._add_param_when_given('graph_format', params, kwargs)
self._add_param_when_given('decomp_var_names', params, kwargs)
self._add_param_when_given('decomp_optimizations', params, kwargs)
self._add_param_when_given('decomp_unreach_funcs', params, kwargs)
self._add_param_when_given('decomp_emit_addresses', params, kwargs)
self._add_param_when_given('architecture', params, kwargs)
self._add_param_when_given('file_format', params, kwargs)
self._add_param_when_given('comp_compiler', params, kwargs)
self._add_param_when_given('comp_debug', params, kwargs)
self._add_param_when_given('comp_strip', params, kwargs)
self._add_param_when_given('comp_optimizations', params, kwargs)
self._add_sel_decomp_funcs_param_when_given(params, kwargs)
self._add_sel_decomp_ranges_param_when_given(params, kwargs)
self._add_param_when_given('sel_decomp_decoding', params, kwargs)
self._add_endian_param_when_given(params, kwargs)
self._add_param_when_given('raw_entry_point', params, kwargs)
self._add_param_when_given('raw_section_vma', params, kwargs)
self._add_param_when_given('ar_index', params, kwargs)
self._add_param_when_given('ar_name', params, kwargs)
self._add_param_when_given('generate_archive', params, kwargs)
self._add_param_when_given('generate_cg', params, kwargs)
self._add_param_when_given('generate_cfgs', params, kwargs)
response = conn.send_post_request(files=files, params=params)
return response['id']
def _get_input_file(self, kwargs):
"""Returns the input file to be decompiled."""
try:
return File(kwargs['input_file'])
except KeyError:
raise MissingParameterError('input_file')
def _add_pdb_file_when_given(self, files, kwargs):
"""Adds a PDB file to `files` when it was given."""
pdb_file = kwargs.get('pdb_file')
if pdb_file is not None:
files['pdb'] = File(pdb_file)
def _get_mode_param(self, input_file, kwargs):
"""Returns a decompilation mode to be used."""
return self._get_param(
'mode',
kwargs,
choices={'c', 'bin', 'raw'},
default=self._get_default_mode(input_file)
)
def _get_default_mode(self, input_file):
"""Returns a default decompilation mode to be used based on the given
input file's name.
"""
return 'c' if input_file.name.lower().endswith('.c') else 'bin'
def _add_sel_decomp_funcs_param_when_given(self, params, kwargs):
"""Adds the ``sel_decomp_funcs`` parameter to `params` when given in
`kwargs`.
"""
value = kwargs.get('sel_decomp_funcs')
if value is not None:
if not isinstance(value, str):
value = ','.join(value)
params['sel_decomp_funcs'] = value
def _add_sel_decomp_ranges_param_when_given(self, params, kwargs):
"""Adds the ``sel_decomp_ranges`` parameter to `params` when given in
`kwargs`.
"""
def ranges2str(ranges):
return ','.join(
range2str(range) for range in ranges
)
def range2str(range):
if isinstance(range, tuple):
assert len(range) == 2, 'invalid range: {}'.format(range)
return '{}-{}'.format(
address2str(range[0]), address2str(range[1])
)
return str(range)
def address2str(address):
if isinstance(address, int):
return hex(address)
return str(address)
value = kwargs.get('sel_decomp_ranges')
if value is not None:
if not isinstance(value, str):
value = ranges2str(value)
params['sel_decomp_ranges'] = value
def _add_endian_param_when_given(self, params, kwargs):
"""Adds the ``endian`` parameter to `params` when given in `kwargs`.
"""
# Since RetDec 2.2, the 'raw_endian' parameter has been renamed to
# 'endian'. However, the original name should still be supported.
endian = kwargs.get('endian', kwargs.get('raw_endian'))
if endian is not None:
params['endian'] = endian
def __repr__(self):
return '<{} api_url={!r}>'.format(
__name__ + '.' + self.__class__.__name__,
self.api_url
) | /retdec-python-0.5.2.tar.gz/retdec-python-0.5.2/retdec/decompiler.py | 0.865196 | 0.466177 | decompiler.py | pypi |
from retdec.exceptions import ArchiveGenerationFailedError
from retdec.exceptions import CFGGenerationFailedError
from retdec.exceptions import CGGenerationFailedError
from retdec.exceptions import DecompilationFailedError
from retdec.exceptions import NoSuchCFGError
from retdec.exceptions import OutputNotRequestedError
from retdec.resource import Resource
class DecompilationPhase:
"""Phase of a decompilation.
:param str name: Name of the phase.
:param str part: Part into which the phase belongs.
:param str description: Description of the phase.
:param int completion: What percentage of the decompilation has been
completed?
:param list warnings: A list of warnings that were produced by the
decompiler in the phase. Each warning is a string.
`part` may be ``None`` if the phase does not belong to any part.
"""
def __init__(self, name, part, description, completion, warnings):
self._name = name
self._part = part
self._description = description
self._completion = completion
self._warnings = warnings
@property
def name(self):
"""Name of the phase (`str`)."""
return self._name
@property
def part(self):
"""Part to which the phase belongs (`str`).
May be ``None`` if the phase does not belong to any part.
"""
return self._part
@property
def description(self):
"""Description of the phase (`str`)."""
return self._description
@property
def completion(self):
"""Completion (in percentages, ``0-100``)."""
return self._completion
@property
def warnings(self):
"""A list of warnings that were produced by the decompiler in the
phase.
Each warning is a string.
"""
return self._warnings
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self == other
def __repr__(self):
return ('{}(name={!r}, part={!r}, description={!r},'
' completion={}, warnings={!r})').format(
__name__ + '.' + self.__class__.__name__,
self.name,
self.part,
self.description,
self.completion,
self.warnings
)
class Decompilation(Resource):
"""A representation of a decompilation."""
def get_completion(self):
"""How much of the decompilation has been completed (in percentage)?
It is an ``int`` between 0 and 100.
"""
self._update_state_if_needed()
return self._completion
def get_phases(self):
"""Obtains and returns the list of phases
(:class:`~retdec.decompilation.DecompilationPhase`).
"""
self._update_state_if_needed()
return self._phases
def wait_until_finished(self, callback=None,
on_failure=DecompilationFailedError):
"""Waits until the decompilation is finished.
:param callable callback: Function to be called when the status of the
decompilation is changed or when it finishes.
:param callable on_failure: What should be done when the decompilation
fails?
If `callback` is not ``None``, it is called with the decompilation as
its argument when the status of the decompilation is changed or when it
finishes.
If `on_failure` is ``None``, nothing is done when the decompilation
fails. Otherwise, it is called with the error message. If the returned
value is an exception, it is raised.
"""
# Ensure that we have something callable (do nothing by default).
callback = callback or (lambda _: None)
# Currently, the retdec.com API does not support push notifications, so
# we have to do polling.
# Track completion changes so we can call the callback when the status
# changes.
last_completion = None
while not self.has_finished():
if (last_completion is not None and
self._completion != last_completion):
callback(self)
last_completion = self._completion
self._wait_until_state_can_be_updated()
# The decompilation has finished.
# Call the callback one final time. This has to be done because the
# decompilation may have immediately finished, without giving us chance
# to call the callback.
callback(self)
if self._failed:
self._handle_failure(on_failure, self._error)
def get_hll_code(self):
"""Obtains and returns the decompiled code in the high-level language
(`str`).
"""
return self._get_file_contents(
self._path_to_output_file('hll'),
is_text_file=True
)
def save_hll_code(self, directory=None):
"""Saves the decompiled code in the high-level language to the given
directory.
:param str directory: Path to a directory in which the decompiled code
will be stored.
:returns: Path to the saved file (`str`).
If `directory` is ``None``, the current working directory is used.
"""
return self._get_file_and_save_it(
self._path_to_output_file('hll'),
directory
)
def get_dsm_code(self):
"""Obtains and returns the disassembled input file in assembly-like
syntax (`str`).
"""
return self._get_file_contents(
self._path_to_output_file('dsm'),
is_text_file=True
)
def save_dsm_code(self, directory=None):
"""Saves the disassembled input file in assembly-like syntax to the
given directory.
:param str directory: Path to a directory in which the file will be
stored.
:returns: Path to the saved file (`str`).
If `directory` is ``None``, the current working directory is used.
"""
return self._get_file_and_save_it(
self._path_to_output_file('dsm'),
directory
)
def cg_generation_has_finished(self):
"""Checks if the call-graph generation has finished.
:raises OutputNotRequestedError: When the call graph was not requested
to be generated.
"""
self._update_state_if_needed()
return self._cg_status.finished
def cg_generation_has_succeeded(self):
"""Checks if the call-graph generation has succeeded.
:raises OutputNotRequestedError: When the call graph was not requested
to be generated.
"""
self._update_state_if_needed()
return self._cg_status.generated
def cg_generation_has_failed(self):
"""Checks if the call graph has failed to generate.
:raises OutputNotRequestedError: When the call graph was not requested
to be generated.
"""
self._update_state_if_needed()
return self._cg_status.failed
def get_cg_generation_error(self):
"""Returns the reason why the call graph failed to generate.
:raises OutputNotRequestedError: When the call graph was not requested
to be generated.
If the call-graph generation has not failed, it returns ``None``.
"""
self._update_state_if_needed()
return self._cg_status.error
def wait_until_cg_is_generated(
self, on_failure=CGGenerationFailedError):
"""Waits until the call graph is generated.
:param callable on_failure: What should be done when the generation
fails?
:raises OutputNotRequestedError: When the call graph was not requested
to be generated.
If `on_failure` is ``None``, nothing is done when the generation fails.
Otherwise, it is called with the error message. If the returned value
is an exception, it is raised.
"""
# Currently, the retdec.com API does not support push notifications, so
# we have to do polling.
while not self.cg_generation_has_finished():
self._wait_until_state_can_be_updated()
if self._cg_status.failed:
self._handle_failure(on_failure, self._cg_status.error)
def save_cg(self, directory=None):
"""Saves the call graph to the given directory.
:param str directory: Path to a directory in which the file will be
stored.
:returns: Path to the saved file (`str`).
If `directory` is ``None``, the current working directory is used.
"""
return self._get_file_and_save_it(
self._path_to_output_file('cg'),
directory
)
@property
def funcs_with_cfg(self):
"""A list of names of functions having a control-flow graph.
The returned list does not depend on the control-flow-graph-generation
status. It always returns the same function names, disregarding whether
their control-flow graph has or has not been generated.
The returned list is ordered by function names.
:raises OutputNotRequestedError: When control-flow graphs were not
requested to be generated.
"""
self._update_state_if_needed()
return sorted(self._cfg_statuses.keys())
def cfg_generation_has_finished(self, func):
"""Checks if the generation of a control-flow graph for the given
function has finished.
:param str func: Name of the function.
:raises OutputNotRequestedError: When control-flow graphs were not
requested to be generated.
:raises NoSuchCFGError: When there is no control-flow graph for the
given function.
"""
self._update_state_if_needed()
return self._cfg_statuses[func].finished
def cfg_generation_has_succeeded(self, func):
"""Checks if the generation of a control-flow graph for the given
function has succeeded.
:param str func: Name of the function.
:raises OutputNotRequestedError: When control-flow graphs were not
requested to be generated.
:raises NoSuchCFGError: When there is no control-flow graph for the
given function.
"""
self._update_state_if_needed()
return self._cfg_statuses[func].generated
def cfg_generation_has_failed(self, func):
"""Checks if the generation of a control-flow graph for the given
function has failed.
:param str func: Name of the function.
:raises OutputNotRequestedError: When control-flow graphs were not
requested to be generated.
:raises NoSuchCFGError: When there is no control-flow graph for the
given function.
"""
self._update_state_if_needed()
return self._cfg_statuses[func].failed
def get_cfg_generation_error(self, func):
"""Returns the reason why the control-flow graph for the given function
failed to generate.
:param str func: Name of the function.
:raises OutputNotRequestedError: When control-flow graphs were not
requested to be generated.
:raises NoSuchCFGError: When there is no control-flow graph for the
given function.
If the control-flow-graph generation has not failed, it returns
``None``.
"""
self._update_state_if_needed()
return self._cfg_statuses[func].error
def wait_until_cfg_is_generated(
self, func, on_failure=CFGGenerationFailedError):
"""Waits until the control-flow graph for the given function is
generated.
:param str func: Name of the function.
:param callable on_failure: What should be done when the generation
fails?
:raises OutputNotRequestedError: When control-flow graphs were not
requested to be generated.
:raises NoSuchCFGError: When there is no control-flow graph for the
given function.
If `on_failure` is ``None``, nothing is done when the generation fails.
Otherwise, it is called with the error message. If the returned value
is an exception, it is raised.
"""
# Currently, the retdec.com API does not support push notifications, so
# we have to do polling.
while not self.cfg_generation_has_finished(func):
self._wait_until_state_can_be_updated()
if self._cfg_statuses[func].failed:
self._handle_failure(on_failure, self._cfg_statuses[func].error)
def save_cfg(self, func, directory=None):
"""Saves the control-flow graph for the given function to the given
directory.
:param str func: Name of the function.
:param str directory: Path to a directory in which the file will be
stored.
:returns: Path to the saved file (`str`).
If `directory` is ``None``, the current working directory is used.
"""
return self._get_file_and_save_it(
self._path_to_output_file('cfgs/{}'.format(func)),
directory
)
def archive_generation_has_finished(self):
"""Checks if the archive generation has finished.
:raises OutputNotRequestedError: When the archive was not requested to
be generated.
"""
self._update_state_if_needed()
return self._archive_status.finished
def archive_generation_has_succeeded(self):
"""Checks if the archive generation has succeeded.
:raises OutputNotRequestedError: When the archive was not requested to
be generated.
"""
self._update_state_if_needed()
return self._archive_status.generated
def archive_generation_has_failed(self):
"""Checks if the archive has failed to generate.
:raises OutputNotRequestedError: When the archive was not requested to
be generated.
"""
self._update_state_if_needed()
return self._archive_status.failed
def get_archive_generation_error(self):
"""Returns the reason why the archive failed to generate.
:raises OutputNotRequestedError: When the archive was not requested to
be generated.
If the archive has not failed, it returns ``None``.
"""
self._update_state_if_needed()
return self._archive_status.error
def wait_until_archive_is_generated(
self, on_failure=ArchiveGenerationFailedError):
"""Waits until the archive containing all outputs from the
decompilation is generated.
:param callable on_failure: What should be done when the generation
fails?
:raises OutputNotRequestedError: When the archive was not requested to
be generated.
If `on_failure` is ``None``, nothing is done when the generation fails.
Otherwise, it is called with the error message. If the returned value
is an exception, it is raised.
"""
# Currently, the retdec.com API does not support push notifications, so
# we have to do polling.
while not self.archive_generation_has_finished():
self._wait_until_state_can_be_updated()
if self._archive_status.failed:
self._handle_failure(on_failure, self._archive_status.error)
def save_archive(self, directory=None):
"""Saves the archive containing all outputs from the decompilation
to the given directory.
:param str directory: Path to a directory in which the file will be
stored.
:returns: Path to the saved file (`str`).
If `directory` is ``None``, the current working directory is used.
"""
return self._get_file_and_save_it(
self._path_to_output_file('archive'),
directory
)
def save_binary(self, directory=None):
"""Saves the compiled version of the input C file (provided that the
input was a C file) to the given directory.
:param str directory: Path to a directory in which the file will be
stored.
:returns: Path to the saved file (`str`).
If `directory` is ``None``, the current working directory is used.
"""
return self._get_file_and_save_it(
self._path_to_output_file('binary'),
directory
)
def _update_state(self):
"""Updates the state of the decompilation."""
status = super()._update_state()
self._completion = status['completion']
self._phases = self._phases_from_status(status)
self._cg_status = self._cg_status_from_status(status)
self._cfg_statuses = self._cfg_statuses_from_status(status)
self._archive_status = self._archive_status_from_status(status)
return status
def _phases_from_status(self, status):
"""Creates a list of phases from the given status."""
return [
DecompilationPhase(
phase['name'],
phase['part'],
phase['description'],
phase['completion'],
phase['warnings']
) for phase in status['phases']
]
def _cg_status_from_status(self, status):
"""Returns the call-graph generation status from the given status."""
if 'cg' not in status:
return _NotRequestedOutputStatus()
return _OutputGenerationStatus(**status['cg'])
def _cfg_statuses_from_status(self, status):
"""Returns the control-flow-graph generation statuses from the given
status.
"""
if 'cfgs' not in status:
return _DictRaisingOutputNotRequestedError()
return _DictRaisingErrorWhenNoSuchCFG({
func: _OutputGenerationStatus(**status)
for func, status in status['cfgs'].items()
})
def _archive_status_from_status(self, status):
"""Returns the archive generation status from the given status."""
if 'archive' not in status:
return _NotRequestedOutputStatus()
return _OutputGenerationStatus(**status['archive'])
def _path_to_output_file(self, output_file):
"""Returns a path to the given output file."""
return '/{}/outputs/{}'.format(self.id, output_file)
def __repr__(self):
return '<{} id={!r}>'.format(
__name__ + '.' + self.__class__.__name__,
self.id
)
class _OutputGenerationStatus:
"""A status of output generation.
:param bool generated: Has the output been generated?
:param bool failed: Has the generation failed?
:param str error: Reason why the generation failed.
"""
def __init__(self, generated, failed, error):
self._generated = generated
self._failed = failed
self._error = error
@property
def generated(self):
"""Has the output been generated?"""
return self._generated
@property
def failed(self):
"""Has the generation failed?"""
return self._failed
@property
def error(self):
"""Reason why the generation failed (`str`)."""
return self._error
@property
def finished(self):
"""Has the output generation finished?"""
return self.generated or self.failed
class _NotRequestedOutputStatus:
"""An output generation status that raises
:class:`~retdec.exceptions.OutputNotRequestedError` whenever it is queried.
"""
@property
def generated(self):
raise OutputNotRequestedError
@property
def failed(self):
raise OutputNotRequestedError
@property
def error(self):
raise OutputNotRequestedError
@property
def finished(self):
raise OutputNotRequestedError
class _DictRaisingOutputNotRequestedError(dict):
"""A dictionary that raises
:class:`~retdec.exceptions.OutputNotRequestedError` upon access.
"""
def keys(self):
raise OutputNotRequestedError
def __getitem__(self, key):
raise OutputNotRequestedError
class _DictRaisingErrorWhenNoSuchCFG(dict):
"""A dictionary that raises :class:`~retdec.exceptions.NoSuchCFGError` when
a key (i.e. a function) is missing.
"""
def __missing__(self, func):
raise NoSuchCFGError(func) | /retdec-python-0.5.2.tar.gz/retdec-python-0.5.2/retdec/decompilation.py | 0.888982 | 0.331958 | decompilation.py | pypi |
class RetdecError(Exception):
"""Base class of all custom exceptions raised by the library."""
class MissingAPIKeyError(RetdecError):
"""Exception raised when an API key is missing."""
def __init__(self):
super().__init__(
'No explicit API key given'
' and environment variable RETDEC_API_KEY is not set.'
)
class MissingParameterError(RetdecError):
"""Exception raised when a required parameter is not set.
:param str name: Name of the missing parameter.
"""
def __init__(self, name):
super().__init__(
"Missing parameter '{}'.".format(name)
)
class InvalidValueError(RetdecError):
"""Exception raised when a parameter has an invalid value.
:param str name: Name of the parameter whose value is invalid.
:param value: The invalid value.
"""
def __init__(self, name, value):
super().__init__(
"Value '{}' of parameter '{}' is invalid.".format(value, name)
)
class AuthenticationError(RetdecError):
"""Exception raised when authentication with the provided API key fails."""
def __init__(self):
super().__init__(
'Authentication with the given API key failed (is the key valid?).'
)
class ConnectionError(RetdecError):
"""Exception raised when there is a connection error."""
class AnalysisFailedError(RetdecError):
"""Exception raised when a fileinfo analysis has failed."""
class DecompilationFailedError(RetdecError):
"""Exception raised when a decompilation has failed."""
class OutputNotRequestedError(RetdecError):
"""Exception raised when an output is queried which was not requested to be
generated.
"""
def __init__(self):
super().__init__(
'The output was not requested to be generated.'
)
class CGGenerationFailedError(RetdecError):
"""Exception raised when the generation of a call graph fails.
"""
class CFGGenerationFailedError(RetdecError):
"""Exception raised when the generation of a control-flow graph fails."""
class NoSuchCFGError(RetdecError):
"""Exception raised when a control-flow graph for a non-existing function
is requested.
:param str func: Name of the function whose control-flow graph was
requested.
"""
def __init__(self, func):
super().__init__(
"There is no control-flow graph for '{}'.".format(func)
)
class ArchiveGenerationFailedError(RetdecError):
"""Exception raised when the generation of an archive fails."""
class UnknownAPIError(RetdecError):
"""Exception raised when there is an unknown API error.
:param int code: Error code.
:param str message: Short message describing what went wrong.
:param str description: Longer description of what went wrong.
"""
def __init__(self, code, message, description):
super().__init__(description)
self._code = code
self._message = message
self._description = description
@property
def code(self):
"""Error code (`int`)."""
return self._code
@property
def message(self):
"""Short message describing what went wrong (`str`)."""
return self._message
@property
def description(self):
"""Longer description of what went wrong (`str`)."""
return self._description | /retdec-python-0.5.2.tar.gz/retdec-python-0.5.2/retdec/exceptions.py | 0.929919 | 0.317969 | exceptions.py | pypi |
from abc import ABC
from datetime import datetime as DateTime, timedelta as TimeDelta
_reference_date = DateTime(1970, 1, 1)
def _year(time_stamp: DateTime) -> int:
return time_stamp.year - _reference_date.year
def _day(time_stamp: DateTime) -> int:
return int((time_stamp - _reference_date).days)
class Period(ABC):
def to_period(self, time_stamp: DateTime) -> int:
"""Converts a time_stamp into an integer representing the period of time it is in"""
raise NotImplementedError()
def period_start(self, period: int) -> DateTime:
"""Returns the start time of the period"""
raise NotImplementedError()
def max_duration(self) -> TimeDelta:
"""Returns the maximum duration of the period"""
raise NotImplementedError()
class SubdividedPeriod(Period):
def __init__(self, sub_period: Period, subdivisions: int):
self._sub_period = sub_period
self._sub_div = subdivisions
def to_period(self, time_stamp: DateTime) -> int:
internal_period = self._sub_period.to_period(time_stamp)
start_time = self._sub_period.period_start(internal_period)
next_time = self._sub_period.period_start(internal_period + 1)
fraction = (time_stamp - start_time) / (next_time - start_time)
return internal_period * self._sub_div + int(fraction * self._sub_div)
def period_start(self, period: int) -> DateTime:
# Get the underlying period and its start
p = period // self._sub_div
internal_start = self._sub_period.period_start(p)
internal_next = self._sub_period.period_start(p + 1)
return internal_start + (internal_next - internal_start) * (period % self._sub_div) / self._sub_div
def max_duration(self) -> TimeDelta:
return self._sub_period.max_duration() / self._sub_div
def __repr__(self):
return f"{self._sub_period}/{self._sub_div}"
class Year(Period):
def to_period(self, time_stamp: DateTime) -> int:
return _year(time_stamp)
def max_duration(self) -> TimeDelta:
return TimeDelta(days=366)
def period_start(self, period: int) -> DateTime:
return DateTime(_reference_date.year + period, 1, 1)
def __repr__(self):
return "Year"
class Month(Period):
def to_period(self, time_stamp: DateTime) -> int:
return _year(time_stamp) * 12 + time_stamp.month
def max_duration(self) -> TimeDelta:
return TimeDelta(days=31)
def period_start(self, period: int) -> DateTime:
year = _reference_date.year + (period // 12)
month = period % 12
if month == 0:
month = 12
year -= 1
return DateTime(year, month, 1)
def __repr__(self):
return "Month"
class Week(Period):
def to_period(self, time_stamp: DateTime) -> int:
# This is easier than dealing with the years that have 53 weeks
days = _day(time_stamp) - _reference_date.isocalendar()[2]
return days // 7
def max_duration(self) -> TimeDelta:
return TimeDelta(days=7)
def period_start(self, period: int) -> DateTime:
return _reference_date + TimeDelta(days=(period * 7) + _reference_date.isocalendar()[2])
def __repr__(self):
return "Week"
class Day(Period):
def to_period(self, time_stamp: DateTime) -> int:
return _day(time_stamp)
def max_duration(self) -> TimeDelta:
return TimeDelta(hours=24)
def period_start(self, period: int) -> DateTime:
return _reference_date + TimeDelta(days=period)
def __repr__(self):
return "Day"
class Hour(Period):
def to_period(self, time_stamp: DateTime) -> int:
return _day(time_stamp) * 24 + time_stamp.hour
def max_duration(self) -> TimeDelta:
return TimeDelta(minutes=60)
def period_start(self, period: int) -> DateTime:
return _reference_date + TimeDelta(hours=period)
def __repr__(self):
return "Hour"
class Minute(Period):
def to_period(self, time_stamp: DateTime) -> int:
return (_day(time_stamp) * 24 + time_stamp.hour) * 60 + time_stamp.minute
def max_duration(self) -> TimeDelta:
return TimeDelta(seconds=60)
def period_start(self, period: int) -> DateTime:
return _reference_date + TimeDelta(minutes=period)
def __repr__(self):
return "Minute" | /retention_rules-0.1.1-py3-none-any.whl/retention_rules/periods.py | 0.928124 | 0.564639 | periods.py | pypi |
from dataclasses import dataclass
from enum import Enum
from .periods import Period
from datetime import datetime as DateTime
from typing import List, Optional, Any, Callable
class RetainStrategy(Enum):
OLDEST = "oldest"
NEWEST = "newest"
@dataclass
class PolicyRule:
applies_for: Period
applies_period_count: int
retain_every: Period
note: Optional[str] = None
@dataclass
class _Result:
index: int
retain: bool
time: DateTime
@dataclass
class RetentionResult:
time: DateTime
retain: bool
item: Any
rule: Optional[PolicyRule]
class RetentionPolicy:
def __init__(self, **kwargs):
"""
:param rules: A list of PolicyRule objects
:param retain_strategy: The strategy to use to decide which items to retain. When a retain-every period has more
than one item in it (for example, in a retain-every hour rule there may be multiple items from a single
hour, and we only want to retain one of the) this determines which gets kept.
:param reuse_in_group: If a retain-every period already has an item marked to be retained from a previous rule,
should we re-use that item for the current rule? If True, we will skip the retain_strategy logic and move
on, since we already know that at least one item in this period will be retained. If False, we will use
the retain_strategy to determine which gets kept, which may or may not end up being the same as the item
already marked to keep.
"""
self.rules: List[PolicyRule] = kwargs.get("rules", [])
self.retain_strategy: RetainStrategy = kwargs.get("retain_strategy", RetainStrategy.OLDEST)
self.reuse_in_group: bool = kwargs.get("reuse_in_group", False)
self._update()
def _update(self):
self.rules.sort(key=lambda l: l.applies_for.max_duration() * l.applies_period_count)
def add_rule(self, applies_for: Period, applies_period_count: int, retain_every: Period,
note: Optional[str] = None):
""" Adds a rule to the policy. """
self.rules.append(PolicyRule(applies_for, applies_period_count, retain_every, note))
self._update()
def check_retention(self, items: List[Any],
key: Optional[Callable[[Any], DateTime]] = None,
now: Optional[DateTime] = None) -> List[RetentionResult]:
""" Checks if the given time_stamps are to be retained according to the policy. """
now = now or DateTime.now()
results = _prepare_working(items, key)
# We will iterate through each layer and mutate the results as we go. Layers will be able to mark a result as
# to be retained, but will not un-mark something which has already been marked.
for rule in self.rules:
# Get the integer of the current rule's applies-to period, then calculate the smallest period integer which
# the current rule will apply to.
this_period = rule.applies_for.to_period(now)
applies_period = this_period - rule.applies_period_count + 1
# Find the items which are within the same applies-to period range as the current time
applicable_items = [x for x in results if rule.applies_for.to_period(x.time) >= applies_period]
# Now we will group the applicable items by the retain-every period that they fall in
grouped = _group_items_by_period(applicable_items, rule.retain_every)
# Now we will iterate through the groups and determine which item should be retained
for item_group in grouped:
if self.reuse_in_group and any(x.retain for x in item_group):
# If we are re-using items in a group, we will first check if any of the items are already marked
# to be retained. If so, we can safely skip doing anything to this group
continue
if self.retain_strategy == RetainStrategy.OLDEST:
# If we are retaining the oldest (first) item, we will mark the minimum time-stamp item
to_retain = min(item_group, key=lambda x: x.time)
if not to_retain.retain:
to_retain.retain = True
to_retain.rule = rule
elif self.retain_strategy == RetainStrategy.NEWEST:
# If we are retaining the newest (last) item, we will mark the maximum time-stamp item
to_retain = max(item_group, key=lambda x: x.time)
if not to_retain.retain:
to_retain.retain = True
to_retain.rule = rule
# Finally, we will return the results
return results
def _group_items_by_period(items: List[RetentionResult], period: Period) -> List[List[RetentionResult]]:
""" Groups the given items by the given period. """
grouped_items = {}
for item in items:
i = period.to_period(item.time)
if i not in grouped_items:
grouped_items[i] = []
grouped_items[i].append(item)
return list(grouped_items.values())
def _prepare_working(items: List[Any], key: Optional[Callable[[Any], DateTime]] = None) -> List[RetentionResult]:
""" Prepares the given items for processing by the policy. """
key = key or (lambda x: x)
# Validate that we are getting datetime objects
time_stamps = []
for item in items:
time_stamp = key(item)
if not isinstance(time_stamp, DateTime):
raise TypeError(f"Could not get a datetime from the {item} item, instead got: {type(time_stamp)}")
time_stamps.append(time_stamp)
return [RetentionResult(time_stamp, False, item, None) for time_stamp, item in zip(time_stamps, items)] | /retention_rules-0.1.1-py3-none-any.whl/retention_rules/policy.py | 0.933794 | 0.461623 | policy.py | pypi |
import re
from typing import Dict, Tuple, Callable
from .policy import RetentionPolicy, RetainStrategy
from .periods import *
class PolicyBuilder:
def __init__(self, **kwargs):
self.keys: Dict[str, Callable[[], Period]] = kwargs.get("keys", _by_key)
def build(self, policy_dict: Dict) -> RetentionPolicy:
retain = RetainStrategy(policy_dict.get("retain", "oldest"))
reuse = policy_dict.get("reuse", False)
policy = RetentionPolicy(retain_strategy=retain, reuse_in_group=reuse)
rule_items = policy_dict.get("rules", [])
if not rule_items:
raise ValueError("Policy must contain at least one rule in the 'rules' key")
for rule in rule_items:
applies_text = rule["applies_for"]
retain_text = rule["retain_every"]
applies, count = self._parse_key_text(applies_text)
retain_every, rcount = self._parse_key_text(retain_text)
if rcount != 1:
raise ValueError(f"Retain-every does not currently support a count")
note = rule.get("note", None) or f"{applies_text} retain {retain_text}"
policy.add_rule(applies, count, retain_every, note)
return policy
def _parse_key_text(self, text: str):
match = _key_pattern.match(text)
if not match:
raise ValueError(f"Invalid specifier: {text}")
count = int(match.group(1) or 1)
unit_key = match.group(2)
sub_div = match.group(3)
period_cls = self.keys.get(unit_key, None)
if not period_cls:
raise ValueError(f"PolicyBuilder could not find a period associated with the key: {unit_key}")
period = period_cls()
if sub_div:
period = SubdividedPeriod(period, int(sub_div[1:]))
return period, count
_key_pattern = re.compile(r"^(\d+)?(\w+)(/\d+)?$")
_by_key = {
"Y": Year,
"M": Month,
"W": Week,
"D": Day,
"H": Hour,
"MIN": Minute,
} | /retention_rules-0.1.1-py3-none-any.whl/retention_rules/builder.py | 0.809427 | 0.197735 | builder.py | pypi |
import torch
class InvalidRetentionParametersException(Exception):
"""
Raised in the event that parameters passed to the
model are invalid according to the architecture
defined in the original paper.
"""
def __init__(self, hidden_size: int, number_of_heads: int):
self.message = f"hidden_size ({hidden_size}) must be divisible by number_of_heads ({number_of_heads})"
super().__init__(self.message)
class InvalidTemperatureException(Exception):
"""
Raised in the event that the temperature passed is not both greater than zero and less than or
equal to 1.
"""
def __init__(self, temperature: float):
self.message = f"temperature ({temperature}) must be both greater than zero and less than or equal to 1"
super().__init__(self.message)
class InvalidHiddenSizeException(Exception):
"""
Raised in the event that the hidden size parameter passed is invalid
"""
def __init__(self, hidden_size: int, model_required_hidden_size: int):
self.message = f"hidden_size ({hidden_size}) must be equal to model.hidden_size ({hidden_size})"
super().__init__(self.message)
class HalfPointPrecisionException(Exception):
"""
Raised in the event that an operation attempts
to use half point precision when Pytorch does not
yet support operations on that type.
"""
def __init__(
self,
tensor: torch.Tensor,
):
self.message = f"tensor (dtype: {tensor.dtype}) is not currently supported for this operation"
super().__init__(self.message)
class ComplexTensorException(Exception):
"""
Raised in the event that an operation attempts
to use complex tensor dtypes when Pytorch does not
yet support operations on that type.
"""
def __init__(
self,
tensor: torch.Tensor,
):
self.message = f"tensor (dtype: {tensor.dtype}) is not currently supported for this operation"
super().__init__(self.message)
class InvalidBatchSizeException(Exception):
"""
Raised in the event that an operation attempts
to tensors that have different batch_sizes. If
this is raised, there is some improper tensor
reshaping happening.
"""
def __init__(
self,
tensora: torch.Tensor,
tensorb: torch.Tensor,
):
self.message = f"tensora (batch_size: {tensora.shape[0]}) is does not match tensorb (batch_size: {tensorb.shape[0]})"
super().__init__(self.message) | /retentive_network-0.1.0.tar.gz/retentive_network-0.1.0/retentive_network/exceptions.py | 0.873566 | 0.619615 | exceptions.py | pypi |
import torch
import torch.nn as nn
from retentive_network.exceptions import InvalidTemperatureException
from retentive_network.models.network import RetentiveNetwork
class RetentiveNetworkCLM(nn.Module):
"""
Huge shoutout to @Jamie-Stirling for
breaking ground here first. The code below
has been fit to match the rest of this repo
but is heavily inspired from the great work
already done here:
* https://github.com/Jamie-Stirling/RetNet
"""
def __init__(
self,
number_of_layers: int,
hidden_size: int,
number_of_heads: int,
feed_forward_size: int,
vocab_size: int,
chunk_size: int,
half_point_precision: bool = False,
use_complex_numbers: bool = False,
softmax: bool = False,
):
super(RetentiveNetworkCLM, self).__init__()
self.number_of_layers: int = number_of_layers
self.hidden_size: int = hidden_size
self.number_of_heads: int = number_of_heads
self.feed_forward_size: int = feed_forward_size
self.vocab_size: int = vocab_size
self.chunk_size: int = chunk_size
self.half_point_precision: bool = half_point_precision
self.use_complex_numbers: bool = use_complex_numbers
self.softmax: bool = softmax
self.torch_dtype: torch.dtype = (
torch.float16 if self.half_point_precision else torch.float32
)
if self.use_complex_numbers:
self.torch_dtype: torch.dtype = (
torch.complex32 if self.half_point_precision else torch.complex64
)
self.model: nn.Module = RetentiveNetwork(
number_of_layers=self.number_of_layers,
hidden_size=self.hidden_size,
number_of_heads=self.number_of_heads,
feed_forward_size=self.feed_forward_size,
chunk_size=self.chunk_size,
half_point_precision=self.half_point_precision,
use_complex_numbers=self.use_complex_numbers,
)
self.embedding_layer: nn.Module = nn.Embedding(self.vocab_size, hidden_size)
self.projection: torch.Tensor = nn.Parameter(
torch.randn(hidden_size, self.vocab_size, dtype=self.torch_dtype)
/ hidden_size
)
self.softmax_layer = nn.Softmax(dim=-1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward pass includes passing `x` of shape
[batch_size, sequence_length] and passes it
through an embedding layer to shape
[batch_size, sequence_length, hidden_size].
This tensor is then passed through the
parallel pass of the RetentiveNetwork
model before being projected into a tensor
of shape [batch_size, self.vocab_size].
Arguments:
x (torch.Tensor): Tensor of shape [
batch_size,
sequence_length
].
Returns:
torch.Tensor: Tensor of shape [
batch_size,
self.vocab_size
].
"""
x: torch.Tensor = self.embedding_layer(x)
x: torch.Tensor = self.model(x)
x: torch.Tensor = torch.matmul(x, self.projection.to(x.dtype))
x: torch.Tensor = x.real
if self.softmax:
x = self.softmax_layer(x)
x = torch.mean(x, dim=-1)
return x
def forward_recurrent(self, x, previous_Ses, n):
"""
Forward pass includes passing `x` of shape
[batch_size, sequence_length] and passes it
through an embedding layer to shape
[batch_size, sequence_length, hidden_size].
This tensor is then passed through the
recurrent pass of the RetentiveNetwork
model before being projected into a tensor
of shape [batch_size, self.vocab_size].
Arguments:
x (torch.Tensor): Tensor of shape [
batch_size,
sequence_length
].
previous_Ses (list): List of floats containing previous S values.
n (int): The current nth iteration.
Returns:
torch.Tensor: Tensor of shape [
batch_size,
self.vocab_size
].
"""
x: torch.Tensor = self.embedding_layer(x)
x, ses = self.model.forward_recurrent(x, previous_Ses, n)
x: torch.Tensor = torch.matmul(x, self.projection.to(x.dtype))
x: torch.Tensor = x.real
if self.softmax:
x = self.softmax_layer(x)
x = torch.mean(x, dim=-1)
return x, ses
def forward_chunkwise(self, x: torch.Tensor, state: torch.Tensor = None):
"""
Chunkwise forward pass includes passing `x`
of shape [batch_size, sequence_length] and
passes it through an embedding layer to shape
[batch_size, sequence_length, hidden_size].
This tensor is then passed through the
recurrent pass of the RetentiveNetwork
model before being projected into a tensor
of shape [batch_size, self.vocab_size].
Arguments:
x (torch.Tensor): Tensor of shape [
batch_size,
sequence_length
].
previous_Ses (list): List of floats containing previous S values.
n (int): The current nth iteration.
Returns:
torch.Tensor: Tensor of shape [
batch_size,
self.vocab_size
].
"""
x: torch.Tensor = self.embedding_layer(x)
x, state = self.model.forward_chunkwise(x, state)
x: torch.Tensor = torch.matmul(x, self.projection.to(x.dtype))
x: torch.Tensor = x.real
if self.softmax:
x = self.softmax_layer(x)
x = torch.mean(x, dim=-1)
return x, state
def sample(
self,
x: torch.Tensor,
sample_length: int,
temperature: float = 1.0,
number_of_samples: int = 1,
):
"""
Uses the recurrent pass of the Retentive Network
model to generate an output id response given
a Tensor of shape [batch_size, sequence_length].
Arguments:
x (torch.Tensor): Tensor of shape [
batch_size,
sequence_length
].
sample_length (int): How long in tokens the sequence returned should be.
temperature (float): (0.0, 1.0] Controls the "randomness" or "creativity"
of the model.
number_of_samples (int): How many samples to generate from `x`.
Returns:
torch.Tensor: Tensor of shape [batch_size, `sample_length`]
"""
if temperature <= 0 or temperature > 1:
raise InvalidTemperatureException(temperature)
batch_size, sequence_length = x.shape
previous_Ses = [
[
torch.zeros(batch_size, self.head_size, self.head_size)
for _ in range(self.number_of_heads)
]
for _ in range(self.number_of_layers)
]
for idx in range(sequence_length):
X, previous_Ses = self.forward_recurrent(x[:, idx], previous_Ses, idx + 1)
original_x = self._multinomial_probability_distribution(
x=X, temperature=temperature, number_of_samples=number_of_samples
)
samples = []
for sample_idx in range(number_of_samples):
next_char = original_x[:, sample_idx]
output_ids = []
for idx in range(sample_length):
x, previous_Ses = self.forward_recurrent(
next_char, previous_Ses, idx + 1
)
x = self._multinomial_probability_distribution(
x, temperature=temperature, number_of_samples=1
)
next_char = x[:, -1]
output_ids.append(next_char)
output_ids = torch.stack(output_ids, dim=1)
samples.append(output_ids)
samples = torch.stack(samples, dim=1)
return samples
def _multinomial_probability_distribution(
self, x: torch.Tensor, temperature: float = 1.0, number_of_samples: int = 1
) -> torch.Tensor:
"""
Helper method that converts x to a real tensor if it's complex
then applies the temperature before a softmax layer and
finally a multinomial probability distribution.
Arguments:
x (torch.Tensor): Tensor of shape [batch_size, self.vocab_size].
temperature (float): (0.0, 1.0] Controls the "randomness" or "creativity"
of the model.
number_of_samples (int): Number of samples to sample from the
multinomial probability distribution.
Returns:
torch.Tensor: Tensor of shape [batch_size, number_of_samples]
"""
x = x.real if torch.is_complex(x) else x
x /= temperature
x = self.softmax_layer(x)
x = torch.multinomial(x, num_samples=number_of_samples)
return x
@property
def head_size(self):
return self.model.retention_layers[0].head_size
if __name__ == "__main__":
batch_size = 16
number_of_layers = 4
hidden_size = 16
number_of_heads = 8
sequence_length = 100
feed_forward_size = 32
vocab_size = 10
sample_length = 20
number_of_samples = 3
chunk_size = 4
softmax = True
X = torch.randint(0, vocab_size, (batch_size, sequence_length))
model = RetentiveNetworkCLM(
number_of_layers=number_of_layers,
hidden_size=hidden_size,
number_of_heads=number_of_heads,
feed_forward_size=feed_forward_size,
vocab_size=vocab_size,
chunk_size=chunk_size,
softmax=softmax,
)
parallel_out = model(X)
head_size = model.model.retention_layers[0].head_size
previous_Ses = [
[torch.zeros(batch_size, head_size, head_size) for _ in range(number_of_heads)]
for _ in range(number_of_layers)
]
recurrent_out = []
for i in range(sequence_length):
out, s = model.forward_recurrent(X[:, i], previous_Ses, i + 1)
recurrent_out.append(out)
previous_Ses = s
recurrent_out = torch.stack(recurrent_out, dim=1)
sample = model.sample(X, sample_length, number_of_samples=number_of_samples)
assert sample.shape == (batch_size, number_of_samples, sample_length)
if model.softmax:
assert (parallel_out - recurrent_out).abs().max() < 1e-4 | /retentive_network-0.1.0.tar.gz/retentive_network-0.1.0/retentive_network/models/clm.py | 0.95877 | 0.561335 | clm.py | pypi |
import torch
import torch.nn as nn
from retentive_network.exceptions import InvalidHiddenSizeException
from retentive_network.layers.feed_forward import FeedForward
from retentive_network.layers.layer_norm import LayerNorm
from retentive_network.layers.multi_scale_retention import MultiScaleRetention
class RetentiveNetwork(nn.Module):
def __init__(
self,
number_of_layers: int,
hidden_size: int,
number_of_heads: int,
feed_forward_size: int,
chunk_size: int,
half_point_precision: bool = False,
use_complex_numbers: bool = False,
):
super(RetentiveNetwork, self).__init__()
self.number_of_layers: int = number_of_layers
self.hidden_size: int = hidden_size
self.feed_forward_size: int = feed_forward_size
self.number_of_heads: int = number_of_heads
self.chunk_size: int = chunk_size
self.half_point_precision: bool = half_point_precision
self.use_complex_numbers: bool = use_complex_numbers
self.torch_dtype: torch.dtype = (
torch.float16 if self.half_point_precision else torch.float32
)
if self.use_complex_numbers:
self.torch_dtype: torch.dtype = (
torch.complex32 if self.half_point_precision else torch.complex64
)
self.retention_layers: nn.ModuleList = nn.ModuleList(
[
MultiScaleRetention(
hidden_size=self.hidden_size,
number_of_heads=self.number_of_heads,
chunk_size=self.chunk_size,
dtype=self.torch_dtype,
)
for _ in range(self.number_of_layers)
]
)
self.feed_forward_layers: nn.ModuleList = nn.ModuleList(
[
FeedForward(self.hidden_size, self.feed_forward_size)
for _ in range(self.number_of_layers)
]
)
self.layer_norm: nn.Module = LayerNorm(self.hidden_size, dtype=self.torch_dtype)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Implements the parallel forward pass as described in
the original paper.
Arguments:
x (torch.Tensor): Torch tensor of shape
[batch_size, sequence_length, hidden_size].
Returns:
torch.Tensor: Torch tensor of shape
[batch_size, sequence_length, hidden_size].
"""
for retention_layer, feed_forward_layer in zip(
self.retention_layers, self.feed_forward_layers
):
x_layer_norm: torch.Tensor = self.layer_norm(x)
retention_out: torch.Tensor = retention_layer(x_layer_norm) + x
retention_out_layer_norm: torch.Tensor = self.layer_norm(retention_out)
x: torch.Tensor = (
feed_forward_layer(retention_out_layer_norm) + retention_out
)
return x
def forward_recurrent(self, x, previous_Ses, n):
"""
Implements the recurrent forward pass as described in
the original paper.
Arguments:
x (torch.Tensor): Torch tensor of shape
[batch_size, sequence_length, hidden_size].
previous_Ses (list): List of floats containing previous S values.
n (int): The current nth iteration.
Returns:
torch.Tensor: A Tensor of shape [batch_size, sequence_length, self.hidden_size]
torch.Tensor: s Tensor value to be used in the next
recurrent retention forward pass.
"""
ses = []
for i in range(self.number_of_layers):
retention_layer: nn.Module = self.retention_layers[i]
feed_forward_layer: nn.Module = self.feed_forward_layers[i]
x_layer_norm: torch = self.layer_norm(x)
retention_out, s = retention_layer.forward_recurrent(
x_layer_norm, previous_Ses[i], n
)
feed_forward_in: torch = retention_out + x
ses.append(s)
feed_forward_in_layer_norm: torch = self.layer_norm(feed_forward_in)
x: torch = feed_forward_layer(feed_forward_in_layer_norm) + feed_forward_in
return x, ses
def forward_chunkwise(self, x: torch.Tensor, state: torch.Tensor = None):
"""
Implements the chunkwise forward pass as described in
the original paper.
Arguments:
x (torch.Tensor): Torch tensor of shape
[batch_size, sequence_length, hidden_size].
state (torch.Tensor): previous state value returned from the previous
forward_chunkwise() call. If None,
a torch.zeros() state is initialized
in it's place
Returns:
torch.Tensor: A Tensor of shape [batch_size, sequence_length, self.hidden_size]
torch.Tensor: state Tensor value to be used in the next
recurrent retention forward pass of shape
[batch_size, sequence_length, kv_dim, kv_dim] where kv_dim
is hidden_size // number_of_heads
"""
batch_size, sequence_length, hidden_size = x.shape
if hidden_size != self.hidden_size:
raise InvalidHiddenSizeException(
hidden_size=hidden_size, model_required_hidden_size=self.hidden_size
)
state = torch.zeros(
(batch_size, hidden_size, hidden_size),
dtype=self.torch_dtype,
)
for i in range(self.number_of_layers):
x_layer_norm: torch.Tensor = self.layer_norm(x)
retention_layer: nn.Module = self.retention_layers[i]
feed_forward_layer: nn.Module = self.feed_forward_layers[i]
chunkwise_out, state = retention_layer.forward_chunkwise(x, state)
feed_forward_in: torch.Tensor = chunkwise_out + x
feed_forward_in_layer_norm: torch = self.layer_norm(feed_forward_in)
x: torch = feed_forward_layer(feed_forward_in_layer_norm) + feed_forward_in
return x, state
if __name__ == "__main__":
(
batch_size,
sequence_length,
hidden_size,
number_of_heads,
number_of_layers,
feed_forward_size,
chunk_size,
) = (8, 5, 32, 4, 4, 20, 4)
input_: torch.Tensor = torch.randn(batch_size, sequence_length, hidden_size)
model: nn.Module = RetentiveNetwork(
number_of_layers=number_of_layers,
hidden_size=hidden_size,
number_of_heads=number_of_heads,
feed_forward_size=feed_forward_size,
chunk_size=chunk_size,
)
parallel_out: torch.Tensor = model(input_)
s_dim: int = hidden_size // number_of_heads
previous_Ses = [
[torch.zeros(batch_size, s_dim, s_dim) for _ in range(number_of_heads)]
for _ in range(number_of_layers)
]
recurrent_out = []
for idx in range(sequence_length):
out, s_ns = model.forward_recurrent(input_[:, idx, :], previous_Ses, idx + 1)
recurrent_out.append(out)
previous_Ses = s_ns
recurrent_out: torch.Tensor = torch.stack(recurrent_out, dim=1)
chunkwise_out, chunkwise_state = model.forward_chunkwise(
x=input_,
state=None,
) | /retentive_network-0.1.0.tar.gz/retentive_network-0.1.0/retentive_network/models/network.py | 0.949704 | 0.476641 | network.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
from retentive_network.exceptions import InvalidBatchSizeException
from retentive_network.layers.projection import Projection
class Retention(nn.Module):
def __init__(
self,
hidden_size: int,
head_size: int,
gamma: float,
chunk_size: int,
dtype: torch.dtype = torch.float32,
):
super(Retention, self).__init__()
self.hidden_size: int = hidden_size
self.head_size: int = head_size
self.gamma: float = gamma
self.chunk_size: int = chunk_size
self.dtype: torch.dtype = dtype
self.project_q = Projection(
hidden_size=self.head_size, bias=False, dtype=self.dtype
)
self.project_k = Projection(
hidden_size=self.head_size, bias=False, dtype=self.dtype
)
self.project_v = Projection(
hidden_size=self.head_size, bias=False, dtype=self.dtype
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
`The Parallel Representation of Retention`
Arguments:
x (torch.Tensor): Tensor of shape [batch_size, sequence_length, hidden_size]
Returns:
torch.Tensor: Tensor value after applying parallel retention.
"""
if x.dtype != self.dtype:
x = x.to(self.dtype)
batch_size, sequence_length, hidden_size = x.shape[:3]
diagonal_matrix: torch.Tensor = self.diagonal_matrix(sequence_length)
q, k, v = self._project_qkv(x)
attention_mask: torch.Tensor = torch.matmul(
q, k.transpose(-1, -2)
) * diagonal_matrix.unsqueeze(0)
x: torch.Tensor = torch.matmul(attention_mask, v)
return x
def forward_recurrent(self, x: torch.Tensor, previous_S: torch.Tensor, n: int):
"""
`The Recurrent Representation of Retention`.
Shoutout to https://github.com/Jamie-Stirling/RetNet implementation
Arguments:
x (torch.Tensor): Tensor of shape [batch_size, hidden_size]
previous_S (torch.Tensor): Tensor of shape [batch_size,
hidden_size] that typically comes
from the `s` value returned from the
last time this method was called.
Returns:
torch.Tensor: x Tensor value after applying recurrent retention.
torch.Tensor: s Tensor value to be used in the next
recurrent retention forward pass.
"""
if x.dtype != self.dtype:
x = x.to(self.dtype)
n: torch.Tensor = torch.tensor(
n,
dtype=self.dtype,
requires_grad=False,
)
q, k, v = self._project_qkv(x)
kv = torch.matmul(k.transpose(-1, -2), v)
s: torch.Tensor = self.gamma * previous_S + kv
x: torch.Tensor = torch.matmul(q.unsqueeze(1), s).squeeze(1)
return x, s
def diagonal_matrix(self, sequence_length: int) -> torch.Tensor:
"""
Calculates a diagonal matrix with `1` on the diagonal
and `gamma ** row` in the lower triangle diagonal row,
and returns the matrix as a dtype.
Arguments:
sequence_length (int): Sequence size.
Returns:
torch.Tensor: Diagonal Matrix.
"""
x: torch.Tensor = torch.diag(
torch.tensor([1.0 for _ in range(sequence_length)], dtype=self.dtype),
0,
)
for row in range(sequence_length - 1, 0, -1):
eye: torch.Tensor = torch.tensor(
[self.gamma ** (sequence_length - row) for _ in range(sequence_length)],
dtype=self.dtype,
)
diagonal: torch.Tensor = torch.diag(eye, sequence_length - row)[
:sequence_length, :sequence_length
].T
x += diagonal
if x.dtype != self.dtype:
x = x.to(self.dtype)
return x
def forward_chunkwise(self, x: torch.Tensor, state: torch.Tensor = None):
"""
Implements a forward pass on a chunk `x` with
hidden state `state` and bias `gamma`.
Arguments:
x (torch.Tensor): A Tensor of shape [batch_size, sequence_length, hidden_size].
state (torch.Tensor): Torch Tensor of shape [batch_size, hidden_size, hidden_size].
If None, a zero tensor is created.
Returns:
torch.Tensor: A Tensor of shape [batch_size, sequence_length, hidden_size]
torch.Tensor: A Tensor of shape
[batch_size, sequence_length, kv_dim, kv_dim] where kv_dim
is hidden_size // number_of_heads
"""
if x.dtype != self.dtype:
x = x.to(self.dtype)
batch_size, sequence_length, hidden_size = x.shape
if state is None:
state = torch.zeros(
(batch_size, hidden_size, hidden_size), dtype=self.dtype
)
q, k, v = self._project_qkv(x)
retention = torch.matmul(q, k.transpose(-1, -2))
retention_inner = torch.matmul(retention, v)
retention_cross = torch.matmul(q, state)
out = retention_inner + retention_cross
# state *= self.gamma
kv = torch.matmul(k, v.transpose(-1, -2))
kv_dim = hidden_size // sequence_length
kv = kv.repeat([1, kv_dim, kv_dim])
if kv.shape != state.shape:
kv = self._pad_kv(kv, state)
state += kv
return out, state
def _project_qkv(self, x: torch.Tensor) -> torch.Tensor:
"""
Helper method to project Q, K, and V values
from x.
Arguments:
x (torch.Tensor): Torch tensor of shape [
batch_size, sequence_length,
hidden_size, chunk_size
]
Returns:
torch.Tensor: (Q) Torch tensor of shape [
batch_size, sequence_length,
hidden_size, chunk_size
]
torch.Tensor: (K) Torch tensor of shape [
batch_size, sequence_length,
hidden_size, chunk_size
]
torch.Tensor: (V) Torch tensor of shape [
batch_size, sequence_length,
hidden_size, chunk_size
]
"""
q = self.project_q(x)
k = self.project_k(x)
v = self.project_v(x)
return q, k, v
def _pad_kv(self, kv: torch.Tensor, state: torch.Tensor) -> torch.Tensor:
"""
Applies zero-padding to tensor `kv` so that `kv` and `state`
become of the same shape. Microsoft may have found a more
clever way of doing this in the original codebase, but for
now this should not affect the model results.
Arguments:
kv (torch.Tensor): K * V transposed Dot Product.
state (torch.Tensor): Current state tensor.
Returns:
torch.Tensor: kv zero-padded to shape of state.
"""
kv_batch, kv_w, kv_h = kv.shape
state_batch, state_w, state_h = state.shape
if kv_batch != state_batch:
raise InvalidBatchSizeException(kv, state)
else:
w_diff = state_w - kv_w
h_diff = state_h - kv_h
kv = F.pad(input=kv, pad=(0, w_diff, 0, h_diff), mode="constant", value=0)
return kv
if __name__ == "__main__":
batch_size, sequence_length, hidden_size, chunk_size, head_size = (4, 20, 100, 2, 4)
dtype = torch.float32
input_: torch.Tensor = torch.randn(
(batch_size, sequence_length, head_size), dtype=dtype
)
layer: nn.Module = Retention(
hidden_size=hidden_size,
head_size=head_size,
gamma=0.9,
chunk_size=chunk_size,
)
parallel_out: torch.Tensor = layer(input_)
recurrent_out, S = layer.forward_recurrent(input_, 0.1234, 2)
chunkwise_out, state = layer.forward_chunkwise(x=input_, state=None)
assert parallel_out.shape == chunkwise_out.shape | /retentive_network-0.1.0.tar.gz/retentive_network-0.1.0/retentive_network/layers/retention.py | 0.966513 | 0.715349 | retention.py | pypi |
# author: rethge
# created data: 2023/07/20
import torch
from torch import nn
import torch.nn.functional as F
# Depthwise separeble conv———————————————————————————————————————————————
class RTG_depthwise_separable_conv(nn.Module):
def __init__(self, input_size, output_size, kernel_size,
stride, padding, bias: bool = True):
super().__init__()
self.dsc = nn.Sequential(
nn.Conv2d(in_channels=input_size,
out_channels=input_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=input_size,
bias=bias),
nn.Conv2d(in_channels=input_size,
out_channels=output_size,
kernel_size=1,
stride=1,
padding=0,
bias=bias)
)
def forward(self, x):
return self.dsc(x)
class RTG_res_block(nn.Module):
def __init__(self, nin):
super().__init__()
self.inp = nin
self.conv1 = RTG_depthwise_separable_conv(nin, nin, kernel_size=3,
stride=1,padding=1)
self.conv2 = RTG_depthwise_separable_conv(nin, nin, kernel_size=3,
stride=1,padding=1)
def forward(self, x):
return F.selu(self.conv2(F.selu(self.conv1(x))) + x)
class RTG_res_block_expand(nn.Module):
def __init__(self, nin, nout):
super().__init__()
self.inp = nin
self.oup = nout
self.conv1 = RTG_depthwise_separable_conv(nin, nout, kernel_size=3,
stride=1, padding=1)
self.conv2 = RTG_depthwise_separable_conv(nout, nout, kernel_size=3,
stride=1, padding=1)
self.identity = nn.Conv2d(nin, nout, kernel_size=1,
stride=1, padding=0)
def forward(self, x):
a = self.conv2(F.selu(self.conv1(x)))
identity = self.identity(x)
return F.selu(a + identity)
# ViT————————————————————————————————————————————————————————————————————
# a torch class for patch layer -- vision transformer
class RTG_PatchEmbedding(nn.Module):
"""Turns a 2D input image into a 1D sequence learnable embedding vector.
Args:
in_channels (int): Number of color channels for the input images. Defaults to 3.
patch_size (int): Size of patches to convert input image into. Defaults to 16.
embedding_dim (int): Size of embedding to turn image into. Defaults to 768.
"""
def __init__(self,
input_channels: int = 3,
patch_size: int = 16,
embedding_size: int = 768):
super().__init__()
self.patch_size = patch_size
self.patcher = nn.Conv2d(in_channels=input_channels,
out_channels=embedding_size,
stride=patch_size,
kernel_size=patch_size,
padding=0)
self.flatten = nn.Flatten(start_dim=2, end_dim=3)
def forward(self, x):
img_size = x.shape[-1]
assert img_size % self.patch_size == 0, f"Input image size must be divisble by patch size, image shape: {img_size}, patch size: {self.patch_size}"
return self.flatten(self.patcher(x)).permute(0,2,1)
class RTG_MultiheadSelf_attention_block(nn.Module):
def __init__(self,
embedding_dim: int = 768,
num_head: int = 12,
attention_dropout: float = 0):
super().__init__()
self.LayerNorm = nn.LayerNorm(normalized_shape=embedding_dim)
self.multihead_attention = nn.MultiheadAttention(embed_dim=embedding_dim,
num_heads=num_head,
dropout=attention_dropout,
batch_first=True)
def forward(self, x):
x = self.LayerNorm(x) # x with shape [1, 197, 768]
attention_out, _ = self.multihead_attention(query=x,
key=x,
value=x,
need_weights=False)
return attention_out
class RTG_MLPBlock(nn.Module):
def __init__(self,
embedding_dim: int = 768,
mlp_size: int = 3072,
dropout: float = 0.1):
super().__init__()
self.LayerNorm = nn.LayerNorm(normalized_shape=embedding_dim)
self.mlp = nn.Sequential(
nn.Linear(in_features=embedding_dim,
out_features=mlp_size),
nn.GELU(),
nn.Dropout(p=dropout),
nn.Linear(in_features=mlp_size,
out_features=embedding_dim),
nn.Dropout(p=dropout)
)
def forward(self, x):
return self.mlp(self.LayerNorm(x))
class RTG_TransformerEncoderBlock(nn.Module):
def __init__(self,
embedding_dim: int = 768,
num_head: int = 12,
mlp_size: int = 3072,
mlp_dropout: float = 0.1,
attention_dropout: float = 0):
super().__init__()
self.msa_block = RTG_MultiheadSelf_attention_block(embedding_dim=embedding_dim,
num_head=num_head,
attention_dropout=attention_dropout)
self.mlp_block = RTG_MLPBlock(embedding_dim=embedding_dim,
mlp_size=mlp_size,
dropout=mlp_dropout)
def forward(self, x):
x = self.msa_block(x) + x
x = self.mlp_block(x) + x
return x
class RTG_ViT(nn.Module):
def __init__(self,
img_size: int = 224,
input_channels: int = 3,
patch_size: int = 16,
num_transformer_layers: int = 12,
# embedding_dim: int = 768,
num_head: int = 12,
mlp_size: int = 3072,
embedding_dropout: float = 0.1, # Dropout for patch and position embeddings
mlp_dropout: float = 0.1, # Dropout for dense/MLP layers
attention_dropout: float = 0, # Dropout for attention projection
num_classes: int = 1000): # Default for ImageNet but can customize this
super().__init__()
assert img_size % patch_size == 0, f"Image size must be divisible by patch size, image size: {img_size}, patch size: {patch_size}."
# self.patch_size = patch_size
self.num_patches = (img_size//patch_size)**2
self.embedding_dim = input_channels*patch_size**2
self.class_embedding = nn.Parameter(data=torch.randn(1, 1, self.embedding_dim),
requires_grad=True) # 1x1x768
self.position_embedding = nn.Parameter(data=torch.randn(1, self.num_patches+1, self.embedding_dim),
requires_grad=True) # 1x197x768
self.embedding_dropout = nn.Dropout(p=embedding_dropout)
self.img_patch_embedding = RTG_PatchEmbedding(input_channels=input_channels,
patch_size=patch_size,
embedding_size=self.embedding_dim)
# Note: The "*" means "all"
self.transformer_encoder = nn.Sequential( # stack 12 times of encoder block
*[RTG_TransformerEncoderBlock(embedding_dim=self.embedding_dim,
num_head=num_head,
mlp_size=mlp_size,
mlp_dropout=mlp_dropout) for _ in range(num_transformer_layers)])
self.classifier = nn.Sequential(
nn.LayerNorm(normalized_shape=self.embedding_dim),
nn.Linear(in_features=self.embedding_dim,
out_features=num_classes)
)
def forward(self, x):
# Get batch size
# batch_size = x.shape[0]
# Create class token embedding and expand it to match the batch size (equation 1)
class_token = self.class_embedding.expand(x.shape[0], -1, -1) # "-1" means to infer the dimension (try this line on its own)
# Create patch embedding (equation 1)
# x = self.img_patch_embedding(x)
# Concat class embedding and patch embedding (equation 1)
x = torch.cat((class_token, self.img_patch_embedding(x)), dim=1) + self.position_embedding
# Add position embedding to patch embedding (equation 1)
# x = self.position_embedding + x
# Run embedding dropout (Appendix B.1)
# x = self.embedding_dropout(x)
# Pass patch, position and class embedding through transformer encoder layers (equations 2 & 3)
x = self.transformer_encoder(self.embedding_dropout(x))
# Put 0 index logit through classifier (equation 4)
x = self.classifier(x[:, 0]) # run on each sample in a batch at 0 index -> class embeddings at the very top of tensor
return x
# Resnet——————————————————————————————————————————————————————————————————————————————————————————————————————
class Block(nn.Module):
def __init__(self,
in_c,
out_c,
identity_downsample=None, # conv
stride=1):
super().__init__()
self.expansion = 4
self.conv1 = nn.Conv2d(in_c, out_c, kernel_size=1, stride=1, padding=0)
self.bn1 = nn.BatchNorm2d(out_c)
self.conv2 = nn.Conv2d(out_c, out_c, kernel_size=3, stride=stride, padding=1)
self.bn2 = nn.BatchNorm2d(out_c)
self.conv3 = nn.Conv2d(out_c, out_c*self.expansion, kernel_size=1, stride=1, padding=0)
self.bn3 = nn.BatchNorm2d(out_c*self.expansion)
self.relu = nn.ReLU()
self.identity_downsample = identity_downsample
def forward(self, x):
identity = x
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
if self.identity_downsample is not None:
identity = self.identity_downsample(identity)
x += identity
x = self.relu(x)
return x
class RTG_Resnet(nn.Module):
def __init__(self, block, layers, img_channels, num_classes):
super().__init__()
self.in_c = 64
self.conv1 = nn.Conv2d(img_channels, 64, kernel_size=7, stride=2, padding=3)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# res layers
self.layer1 = self._make_layer(block, layers[0],
out_channels=64,
stride=1)
self.layer2 = self._make_layer(block, layers[1],
out_channels=128,
stride=2)
self.layer3 = self._make_layer(block, layers[2],
out_channels=256,
stride=2)
self.layer4 = self._make_layer(block, layers[3],
out_channels=512,
stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1,1)) # GAP
self.fc = nn.Linear(512*4, num_classes)
def forward(self, x):
x = self.maxpool(self.relu(self.bn1(self.conv1(x))))
x = self.avgpool(self.layer4(self.layer3(self.layer2(self.layer1(x)))))
x = x.reshape(x.shape[0], -1)
x = self.fc(x)
return x
def _make_layer(self, block, num_res_blocks, out_channels, stride):
identity_dowansample = None
layers = []
if stride != 1 or self.in_c != out_channels*4:
identity_dowansample = nn.Sequential(
nn.Conv2d(self.in_c, out_channels*4, kernel_size=1, stride=stride),
nn.BatchNorm2d(out_channels*4)
)
layers.append(block(self.in_c, out_channels, identity_dowansample, stride))
self.in_c = out_channels*4
for _ in range(num_res_blocks - 1):
layers.append(block(self.in_c, out_channels))
return nn.Sequential(*layers)
def RTG_Resnet50(img_c=3, num_class=3):
return RTG_Resnet(Block, [3,4,6,3], img_c, num_class) | /rethge_torch-0.0.2.tar.gz/rethge_torch-0.0.2/src/rethge_torch/rethge_components.py | 0.913955 | 0.535098 | rethge_components.py | pypi |
import rethinkdb
docsSource = [
(
rethinkdb.net.Connection.close,
b"conn.close(noreply_wait=True)\n\nClose an open connection.\n\nClosing a connection normally waits until all outstanding requests have finished and then frees any open resources associated with the connection. By passing `False` to the `noreply_wait` optional argument, the connection will be closed immediately, possibly aborting any outstanding noreply writes.\n\nA noreply query is executed by passing the `noreply` option to the [run](http://rethinkdb.com/api/python/run/) command, indicating that `run()` should not wait for the query to complete before returning. You may also explicitly wait for a noreply query to complete by using the [noreply_wait](http://rethinkdb.com/api/python/noreply_wait) command.\n\n*Example* Close an open connection, waiting for noreply writes to finish.\n\n conn.close()\n\n*Example* Close an open connection immediately.\n\n conn.close(noreply_wait=False)\n",
),
(
rethinkdb.connect,
b"r.connect(host=\"localhost\", port=28015, db=\"test\", auth_key=\"\", timeout=20) -> connection\nr.connect(host) -> connection\n\nCreate a new connection to the database server. The keyword arguments are:\n\n- `host`: host of the RethinkDB instance. The default value is `localhost`.\n- `port`: the driver port, by default `28015`.\n- `db`: the database used if not explicitly specified in a query, by default `test`.\n- `user`: the user account to connect as (default `admin`).\n- `password`: the password for the user account to connect as (default `''`, empty).\n- `timeout`: timeout period in seconds for the connection to be opened (default `20`).\n- `ssl`: a hash of options to support SSL connections (default `None`). Currently, there is only one option available, and if the `ssl` option is specified, this key is required:\n - `ca_certs`: a path to the SSL CA certificate.\n\nIf the connection cannot be established, a `ReqlDriverError` exception will be thrown.\n\n<!-- break -->\n\nThe RethinkDB Python driver includes support for asynchronous connections using Tornado and Twisted. Read the asynchronous connections documentation for more information.\n\n*Example* Open a connection using the default host and port, specifying the default database.\n\n conn = r.connect(db='marvel')\n\n*Example* Open a new connection to the database.\n\n conn = r.connect(host='localhost',\n port=28015,\n db='heroes')\n\n*Example* Open a new connection to the database, specifying a user/password combination for authentication.\n\n conn = r.connect(host='localhost',\n port=28015,\n db='heroes',\n user='herofinder',\n password='metropolis')\n\n*Example* Open a new connection to the database using an SSL proxy.\n\n conn = r.connect(host='localhost',\n port=28015,\n auth_key='hunter2',\n ssl={'ca_certs': '/path/to/ca.crt'})\n\n*Example* Use a `with` statement to open a connection and pass it to a block. Using this style, the connection will be automatically closed when execution reaches the end of the block.\n\n with r.connect(db='marvel') as conn:\n r.table('superheroes').run(conn)\n",
),
(
rethinkdb.net.Connection.noreply_wait,
b"conn.noreply_wait()\n\n`noreply_wait` ensures that previous queries with the `noreply` flag have been processed\nby the server. Note that this guarantee only applies to queries run on the given connection.\n\n*Example* We have previously run queries with the `noreply` argument set to `True`. Now\nwait until the server has processed them.\n\n conn.noreply_wait()\n\n",
),
(
rethinkdb,
b"r -> r\n\nThe top-level ReQL namespace.\n\n*Example* Setup your top-level namespace.\n\n import rethinkdb as r\n\n",
),
(
rethinkdb.net.Connection.reconnect,
b"conn.reconnect(noreply_wait=True)\n\nClose and reopen a connection.\n\nClosing a connection normally waits until all outstanding requests have finished and then frees any open resources associated with the connection. By passing `False` to the `noreply_wait` optional argument, the connection will be closed immediately, possibly aborting any outstanding noreply writes.\n\nA noreply query is executed by passing the `noreply` option to the [run](http://rethinkdb.com/api/python/run/) command, indicating that `run()` should not wait for the query to complete before returning. You may also explicitly wait for a noreply query to complete by using the [noreply_wait](http://rethinkdb.com/api/python/noreply_wait) command.\n\n*Example* Cancel outstanding requests/queries that are no longer needed.\n\n conn.reconnect(noreply_wait=False)\n",
),
(
rethinkdb.net.Connection.repl,
b"conn.repl()\n\nSet the default connection to make REPL use easier. Allows calling\n`.run()` on queries without specifying a connection.\n\n__Note:__ Avoid using `repl` in application code. RethinkDB connection objects are not thread-safe, and calls to `connect` from multiple threads may change the global connection object used by `repl`. Applications should specify connections explicitly.\n\n*Example* Set the default connection for the REPL, then call\n`run()` without specifying the connection.\n\n r.connect(db='marvel').repl()\n r.table('heroes').run()\n",
),
(
rethinkdb.ast.RqlQuery.run,
b"query.run(conn[, options]) -> cursor\nquery.run(conn[, options]) -> object\n\nRun a query on a connection, returning either a single JSON result or\na cursor, depending on the query.\n\nThe optional arguments are:\n\n- `read_mode`: One of three possible values affecting the consistency guarantee for the query (default: `'single'`).\n - `'single'` (the default) returns values that are in memory (but not necessarily written to disk) on the primary replica.\n - `'majority'` will only return values that are safely committed on disk on a majority of replicas. This requires sending a message to every replica on each read, so it is the slowest but most consistent.\n - `'outdated'` will return values that are in memory on an arbitrarily-selected replica. This is the fastest but least consistent.\n- `time_format`: what format to return times in (default: `'native'`).\n Set this to `'raw'` if you want times returned as JSON objects for exporting.\n- `profile`: whether or not to return a profile of the query's\n execution (default: `False`).\n- `durability`: possible values are `'hard'` and `'soft'`. In soft durability mode RethinkDB\nwill acknowledge the write immediately after receiving it, but before the write has\nbeen committed to disk.\n- `group_format`: what format to return `grouped_data` and `grouped_streams` in (default: `'native'`).\n Set this to `'raw'` if you want the raw pseudotype.\n- `noreply`: set to `True` to not receive the result object or cursor and return immediately.\n- `db`: the database to run this query against as a string. The default is the database specified in the `db` parameter to [connect](http://rethinkdb.com/api/python/connect/) (which defaults to `test`). The database may also be specified with the [db](http://rethinkdb.com/api/python/db/) command.\n- `array_limit`: the maximum numbers of array elements that can be returned by a query (default: 100,000). This affects all ReQL commands that return arrays. Note that it has no effect on the size of arrays being _written_ to the database; those always have an upper limit of 100,000 elements.\n- `binary_format`: what format to return binary data in (default: `'native'`). Set this to `'raw'` if you want the raw pseudotype.\n- `min_batch_rows`: minimum number of rows to wait for before batching a result set (default: 8). This is an integer.\n- `max_batch_rows`: maximum number of rows to wait for before batching a result set (default: unlimited). This is an integer.\n- `max_batch_bytes`: maximum number of bytes to wait for before batching a result set (default: 1MB). This is an integer.\n- `max_batch_seconds`: maximum number of seconds to wait before batching a result set (default: 0.5). This is a float (not an integer) and may be specified to the microsecond.\n- `first_batch_scaledown_factor`: factor to scale the other parameters down by on the first batch (default: 4). For example, with this set to 8 and `max_batch_rows` set to 80, on the first batch `max_batch_rows` will be adjusted to 10 (80 / 8). This allows the first batch to return faster.\n\n*Example* Run a query on the connection `conn` and print out every\nrow in the result.\n\n for doc in r.table('marvel').run(conn):\n print doc\n\n*Example* If you are OK with potentially out of date data from all\nthe tables involved in this query and want potentially faster reads,\npass a flag allowing out of date data in an options object. Settings\nfor individual tables will supercede this global setting for all\ntables in the query.\n\n r.table('marvel').run(conn, read_mode='outdated')\n\n*Example* If you just want to send a write and forget about it, you\ncan set `noreply` to true in the options. In this case `run` will\nreturn immediately.\n\n r.table('marvel').run(conn, noreply=True)\n\n*Example* If you want to specify whether to wait for a write to be\nwritten to disk (overriding the table's default settings), you can set\n`durability` to `'hard'` or `'soft'` in the options.\n\n r.table('marvel')\n .insert({ 'superhero': 'Iron Man', 'superpower': 'Arc Reactor' })\n .run(conn, noreply=True, durability='soft')\n\n*Example* If you do not want a time object to be converted to a\nnative date object, you can pass a `time_format` flag to prevent it\n(valid flags are \"raw\" and \"native\"). This query returns an object\nwith two fields (`epoch_time` and `$reql_type$`) instead of a native date\nobject.\n\n r.now().run(conn, time_format=\"raw\")\n\n*Example* Specify the database to use for the query.\n\n for doc in r.table('marvel').run(conn, db='heroes'):\n print doc\n\nThis is equivalent to using the `db` command to specify the database:\n\n r.db('heroes').table('marvel').run(conn) ...\n\n*Example* Change the batching parameters for this query.\n\n r.table('marvel').run(conn, max_batch_rows=16, max_batch_bytes=2048)\n",
),
(
rethinkdb.net.Connection.server,
b'conn.server()\n\nReturn information about the server being used by a connection.\n\nThe `server` command returns either two or three fields:\n\n* `id`: the UUID of the server the client is connected to.\n* `proxy`: a boolean indicating whether the server is a RethinkDB proxy node.\n* `name`: the server name. If `proxy` is `True`, this field will not be returned.\n\n*Example* Return server information.\n\n > conn.server()\n \n {\n "id": "404bef53-4b2c-433f-9184-bc3f7bda4a15",\n "name": "amadeus",\n "proxy": False\n }\n',
),
(
rethinkdb.set_loop_type,
b'r.set_loop_type(string)\n\nSet an asynchronous event loop model. There are two supported models:\n\n* `"tornado"`: use the Tornado web framework. Under this model, the connect and run commands will return Tornado `Future` objects.\n* `"twisted"`: use the Twisted networking engine. Under this model, the connect and run commands will return Twisted `Deferred` objects.\n\n*Example* Read a table\'s data using Tornado.\n\n r.set_loop_type("tornado")\n conn = r.connect(host=\'localhost\', port=28015)\n \n @gen.coroutine\n def use_cursor(conn):\n # Print every row in the table.\n cursor = yield r.table(\'test\').order_by(index="id").run(yield conn)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n print(item)\n\nFor a longer discussion with both Tornado and Twisted examples, see the documentation article on Asynchronous connections.\n\n',
),
(
rethinkdb.net.Connection.use,
b"conn.use(db_name)\n\nChange the default database on this connection.\n\n*Example* Change the default database so that we don't need to\nspecify the database when referencing a table.\n\n conn.use('marvel')\n r.table('heroes').run(conn) # refers to r.db('marvel').table('heroes')\n",
),
(
rethinkdb.ast.Table.config,
b'table.config() -> selection<object>\ndatabase.config() -> selection<object>\n\nQuery (read and/or update) the configurations for individual tables or databases.\n\nThe `config` command is a shorthand way to access the `table_config` or `db_config` [System tables](http://rethinkdb.com/docs/system-tables/#configuration-tables). It will return the single row from the system that corresponds to the database or table configuration, as if [get](http://rethinkdb.com/api/python/get) had been called on the system table with the UUID of the database or table in question.\n\n*Example* Get the configuration for the `users` table.\n\n r.table(\'users\').config().run(conn)\n\n<!-- stop -->\n\nExample return:\n\n \n {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "users",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "a",\n "replicas": ["a", "b"],\n "nonvoting_replicas": []\n },\n {\n "primary_replica": "d",\n "replicas": ["c", "d"],\n "nonvoting_replicas": []\n }\n ],\n "indexes": [],\n "write_acks": "majority",\n "durability": "hard"\n }\n\n*Example* Change the write acknowledgement requirement of the `users` table.\n\n r.table(\'users\').config().update({\'write_acks\': \'single\'}).run(conn)\n',
),
(
rethinkdb.grant,
b"r.grant(\"username\", {\"permission\": bool[, ...]}) -> object\ndb.grant(\"username\", {\"permission\": bool[, ...]}) -> object\ntable.grant(\"username\", {\"permission\": bool[, ...]}) -> object\n\nGrant or deny access permissions for a user account, globally or on a per-database or per-table basis.\n\nThere are four different permissions that can be granted to an account:\n\n* `read` allows reading the data in tables.\n* `write` allows modifying data, including inserting, replacing/updating, and deleting.\n* `connect` allows a user to open HTTP connections via the http command. This permission can only be granted in global scope.\n* `config` allows users to create/drop secondary indexes on a table and changing the cluster configuration; to create and drop tables, if granted on a database; and to create and drop databases, if granted globally.\n\nPermissions may be granted on a global scope, or granted for a specific table or database. The scope is defined by calling `grant` on its own (e.g., `r.grant()`, on a table (`r.table().grant()`), or on a database (`r.db().grant()`).\n\nThe `grant` command returns an object of the following form:\n\n {\n \"granted\": 1,\n \"permissions_changes\": [\n {\n \"new_val\": { new permissions },\n \"old_val\": { original permissions }\n }\n ]\n\nThe `granted` field will always be `1`, and the `permissions_changes` list will have one object, describing the new permissions values and the old values they were changed from (which may be `None`).\n\nPermissions that are not defined on a local scope will be inherited from the next largest scope. For example, a write operation on a table will first check if `write` permissions are explicitly set to `True` or `False` for that table and account combination; if they are not, the `write` permissions for the database will be used if those are explicitly set; and if neither table nor database permissions are set for that account, the global `write` permissions for that account will be used.\n\n__Note:__ For all accounts other than the special, system-defined `admin` account, permissions that are not explicitly set in any scope will effectively be `False`. When you create a new user account by inserting a record into the system table, that account will have _no_ permissions until they are explicitly granted.\n\nFor a full description of permissions, read Permissions and user accounts.\n\n*Example* Grant the `chatapp` user account read and write permissions on the `users` database.\n\n > r.db('users').grant('chatapp', {'read': True, 'write': True}).run(conn)\n \n {\n \"granted\": 1,\n \"permissions_changes\": [\n {\n \"new_val\": { \"read\": true, \"write\": true },\n \"old_val\": { null }\n }\n ]\n\n*Example* Deny write permissions from the `chatapp` account for the `admin` table.\n\n r.db('users').table('admin').grant('chatapp', {'write': False}).run(conn)\n\nThis will override the `write: true` permissions granted in the first example, but for this table only. Other tables in the `users` database will inherit from the database permissions.\n\n*Example* Delete a table-level permission for the `chatapp` account.\n\n r.db('users').table('admin').grant('chatapp', {'write': None}).run(conn)\n\nBy specifying `None`, the table scope `write` permission is removed, and will again inherit from the next highest scope (database or global).\n\n*Example* Grant `chatapp` the ability to use HTTP connections.\n\n r.grant('chatapp', {'connect': True}).run(conn)\n\nThis grant can only be given on a global level.\n\n*Example* Grant a `monitor` account read-only access to all databases.\n\n r.grant('monitor', {'read': True}).run(conn)\n",
),
(
rethinkdb.ast.Table.rebalance,
b'table.rebalance() -> object\ndatabase.rebalance() -> object\n\nRebalances the shards of a table. When called on a database, all the tables in that database will be rebalanced.\n\nThe `rebalance` command operates by measuring the distribution of primary keys within a table and picking split points that will give each shard approximately the same number of documents. It won\'t change the number of shards within a table, or change any other configuration aspect for the table or the database.\n\nA table will lose availability temporarily after `rebalance` is called; use the [wait](http://rethinkdb.com/api/python/wait) command to wait for the table to become available again, or [status](http://rethinkdb.com/api/python/status) to check if the table is available for writing.\n\nRethinkDB automatically rebalances tables when the number of shards are increased, and as long as your documents have evenly distributed primary keys—such as the default UUIDs—it is rarely necessary to call `rebalance` manually. Cases where `rebalance` may need to be called include:\n\n* Tables with unevenly distributed primary keys, such as incrementing integers\n* Changing a table\'s primary key type\n* Increasing the number of shards on an empty table, then using non-UUID primary keys in that table\n\nThe [web UI](http://rethinkdb.com/docs/administration-tools/) (and the [info](http://rethinkdb.com/api/python/info) command) can be used to tell you when a table\'s shards need to be rebalanced.\n\nThe return value of `rebalance` is an object with two fields:\n\n* `rebalanced`: the number of tables rebalanced.\n* `status_changes`: a list of new and old table status values. Each element of the list will be an object with two fields:\n * `old_val`: The table\'s [status](http://rethinkdb.com/api/python/status) value before `rebalance` was executed. \n * `new_val`: The table\'s `status` value after `rebalance` was executed. (This value will almost always indicate the table is unavailable.)\n\nSee the [status](http://rethinkdb.com/api/python/status) command for an explanation of the objects returned in the `old_val` and `new_val` fields.\n\n*Example* Rebalance a table.\n\n r.table(\'superheroes\').rebalance().run(conn)\n\n<!-- stop -->\n\nExample return:\n\n {\n "rebalanced": 1,\n "status_changes": [\n {\n "old_val": {\n "db": "database",\n "id": "5cb35225-81b2-4cec-9eef-bfad15481265",\n "name": "superheroes",\n "shards": [\n {\n "primary_replica": "jeeves",\n "replicas": [\n {\n "server": "jeeves",\n "state": "ready"\n }\n ]\n },\n {\n "primary_replica": "jeeves",\n "replicas": [\n {\n "server": "jeeves",\n "state": "ready"\n }\n ]\n }\n ],\n "status": {\n "all_replicas_ready": True,\n "ready_for_outdated_reads": True,\n "ready_for_reads": True,\n "ready_for_writes": True\n }\n },\n "new_val": {\n "db": "database",\n "id": "5cb35225-81b2-4cec-9eef-bfad15481265",\n "name": "superheroes",\n "shards": [\n {\n "primary_replica": "jeeves",\n "replicas": [\n {\n "server": "jeeves",\n "state": "transitioning"\n }\n ]\n },\n {\n "primary_replica": "jeeves",\n "replicas": [\n {\n "server": "jeeves",\n "state": "transitioning"\n }\n ]\n }\n ],\n "status": {\n "all_replicas_ready": False,\n "ready_for_outdated_reads": False,\n "ready_for_reads": False,\n "ready_for_writes": False\n }\n }\n \n }\n ]\n }\n',
),
(
rethinkdb.ast.Table.reconfigure,
b'table.reconfigure(shards=<s>, replicas=<r>[, primary_replica_tag=<t>, dry_run=False, nonvoting_replica_tags=None]) -> object\ndatabase.reconfigure(shards=<s>, replicas=<r>[, primary_replica_tag=<t>, dry_run=False, nonvoting_replica_tags=None]) -> object\ntable.reconfigure(emergency_repair=<option>, dry_run=False) -> object\n\nReconfigure a table\'s sharding and replication.\n\n* `shards`: the number of shards, an integer from 1-64. Required.\n* `replicas`: either an integer or a mapping object. Required.\n * If `replicas` is an integer, it specifies the number of replicas per shard. Specifying more replicas than there are servers will return an error.\n * If `replicas` is an object, it specifies key-value pairs of server tags and the number of replicas to assign to those servers: `{"tag1": 2, "tag2": 4, "tag3": 2, ...}`. For more information about server tags, read [Administration tools](http://rethinkdb.com/docs/administration-tools/).\n* `primary_replica_tag`: the primary server specified by its server tag. Required if `replicas` is an object; the tag must be in the object. This must *not* be specified if `replicas` is an integer.\n* `dry_run`: if `True` the generated configuration will not be applied to the table, only returned.\n* `nonvoting_replica_tags`: replicas with these server tags will be added to the `nonvoting_replicas` list of the resulting configuration. (See [failover](http://rethinkdb.com/docs/failover) for details about non-voting replicas.)\n* `emergency_repair`: Used for the Emergency Repair mode. See the separate section below.\n\nThe return value of `reconfigure` is an object with three fields:\n\n* `reconfigured`: the number of tables reconfigured. This will be `0` if `dry_run` is `True`.\n* `config_changes`: a list of new and old table configuration values. Each element of the list will be an object with two fields:\n * `old_val`: The table\'s [config](http://rethinkdb.com/api/python/config) value before `reconfigure` was executed. \n * `new_val`: The table\'s `config` value after `reconfigure` was executed.\n* `status_changes`: a list of new and old table status values. Each element of the list will be an object with two fields:\n * `old_val`: The table\'s [status](http://rethinkdb.com/api/python/status) value before `reconfigure` was executed. \n * `new_val`: The table\'s `status` value after `reconfigure` was executed.\n\nFor `config_changes` and `status_changes`, see the [config](http://rethinkdb.com/api/python/config) and [status](http://rethinkdb.com/api/python/status) commands for an explanation of the objects returned in the `old_val` and `new_val` fields.\n\nA table will lose availability temporarily after `reconfigure` is called; use the [wait](http://rethinkdb.com/api/python/wait) command to wait for the table to become available again, or [status](http://rethinkdb.com/api/python/status) to check if the table is available for writing.\n\n**Note:** Whenever you call `reconfigure`, the write durability will be set to `hard` and the write acknowledgments will be set to `majority`; these can be changed by using the `config` command on the table.\n\nIf `reconfigure` is called on a database, all the tables in the database will have their configurations affected. The return value will be an array of the objects described above, one per table.\n\nRead [Sharding and replication](http://rethinkdb.com/docs/sharding-and-replication/) for a complete discussion of the subject, including advanced topics.\n\n*Example* Reconfigure a table.\n\n r.table(\'superheroes\').reconfigure(shards=2, replicas=1).run(conn)\n\n<!-- stop -->\n\nExample return:\n\n {\n "reconfigured": 1,\n "config_changes": [\n {\n "new_val": {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "superheroes",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "jeeves",\n "replicas": ["jeeves", "alfred"],\n "nonvoting_replicas": []\n },\n {\n "primary_replica": "alfred",\n "replicas": ["jeeves", "alfred"],\n "nonvoting_replicas": []\n }\n ],\n "indexes": [],\n "write_acks": "majority",\n "durability": "hard"\n },\n "old_val": {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "superheroes",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "alfred",\n "replicas": ["jeeves", "alfred"],\n "nonvoting_replicas": []\n }\n ],\n "indexes": [],\n "write_acks": "majority",\n "durability": "hard"\n }\n }\n ],\n "status_changes": [\n {\n "new_val": (status object),\n "old_val": (status object)\n }\n ]\n }\n\n*Example* Reconfigure a table, specifying replicas by server tags.\n\n r.table(\'superheroes\').reconfigure(shards=2, replicas={\'wooster\': 1, \'wayne\': 1}, primary_replica_tag=\'wooster\').run(conn)\n \n {\n "reconfigured": 1,\n "config_changes": [\n {\n "new_val": {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "superheroes",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "jeeves",\n "replicas": ["jeeves", "alfred"],\n "nonvoting_replicas": []\n },\n {\n "primary_replica": "alfred",\n "replicas": ["jeeves", "alfred"],\n "nonvoting_replicas": []\n }\n ],\n "indexes": [],\n "write_acks": "majority",\n "durability": "hard"\n },\n "old_val": {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "superheroes",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "alfred",\n "replicas": ["jeeves", "alfred"],\n "nonvoting_replicas": []\n }\n ],\n "indexes": [],\n "write_acks": "majority",\n "durability": "hard"\n }\n }\n ],\n "status_changes": [\n {\n "new_val": (status object),\n "old_val": (status object)\n }\n ]\n }\n\nRethinkDB supports automatic failover when more than half of the voting replicas for each shard of a table are still available (see the Failover documentation for more details). However, if half or more of the voting replicas for a shard are lost, failover will not happen automatically, leaving two options:\n\n* Bring enough of the missing servers back online to allow automatic failover\n* Use emergency repair mode to reconfigure the table\n\nThe `emergency_repair` argument is effectively a different command; when it is specified, no other arguments to `reconfigure` are allowed except for `dry_run`. When it\'s executed, each shard of the table is examined and classified into one of three categories:\n\n* **Healthy:** more than half of the shard\'s voting replicas are still available.\n* **Repairable:** the shard is not healthy, but has at least one replica, whether voting or non-voting, available.\n* **Beyond repair:** the shard has no replicas available.\n\nFor each repairable shard, `emergency_repair` will convert all unavailable voting replicas into non-voting replicas. If all the voting replicas were removed, an arbitrarily-chosen available non-voting replica will be converted into a voting replica. After this operation, all of the shard\'s available replicas will be voting replicas.\n\nSpecify `emergency_repair` with one of two string options:\n\n* `unsafe_rollback`: shards that are beyond repair will be left alone.\n* `unsafe_rollback_or_erase`: a shard that is beyond repair will be destroyed and recreated on an available server that holds another shard for that table.\n\nThe return value of `reconfigure` in emergency repair mode is the same as before. Examine the `config_changes` field to see the old and new configuration settings for the table. As in the normal mode, if you specify `emergency_repair` with `dry_run: True`, the table will not actually be reconfigured.\n\n__Note:__ `emergency_repair` may only be used on individual tables, not on databases. It cannot be used after the `db` command.\n\n*Example* Perform an emergency repair on a table.\n\n r.table(\'superheroes\').reconfigure(emergency_repair=\'unsafe_rollback\').run(conn)\n',
),
(
rethinkdb.ast.Table.status,
b'table.status() -> selection<object>\n\nReturn the status of a table.\n\nThe return value is an object providing information about the table\'s shards, replicas and replica readiness states. For a more complete discussion of the object fields, read about the `table_status` table in [System tables](http://rethinkdb.com/docs/system-tables/#status-tables).\n\n* `id`: the UUID of the table.\n* `name`: the table\'s name.\n* `db`: the database the table is in.\n* `status`: the subfields in this field indicate whether all shards of the table are ready to accept the given type of query: `outdated_reads`, `reads` and `writes`. The `all_replicas_ready` field indicates whether all backfills have finished.\n* `shards`: one entry for each shard in `table_config`. Each shard\'s object has the following fields:\n\t* `primary_replicas`: a list of zero or more servers acting as primary replicas for the table.\n\t* `replicas`: a list of all servers acting as a replica for that shard. The `state` field may be one of the following: `ready`, `transitioning`, `backfilling`, `disconnected`, `waiting_for_primary`, or `waiting_for_quorum`.\n\n*Example* Get a table\'s status.\n\n r.table(\'superheroes\').status().run(conn)\n\n<!-- stop -->\n\nExample return:\n\n {\n "db": "database",\n "id": "5cb35225-81b2-4cec-9eef-bfad15481265",\n "name": "superheroes",\n "shards": [\n {\n "primary_replicas": ["jeeves"],\n "replicas": [\n {\n "server": "jeeves",\n "state": "ready"\n }\n ]\n },\n {\n "primary_replicas": ["jeeves"],\n "replicas": [\n {\n "server": "jeeves",\n "state": "ready"\n }\n ]\n }\n ],\n "status": {\n "all_replicas_ready": True,\n "ready_for_outdated_reads": True,\n "ready_for_reads": True,\n "ready_for_writes": True\n }\n }\n',
),
(
rethinkdb.ast.Table.wait,
b"table.wait([wait_for='ready_for_writes', timeout=<sec>]) -> object\ndatabase.wait([wait_for='ready_for_writes', timeout=<sec>]) -> object\nr.wait(table | database, [wait_for='ready_for_writes', timeout=<sec>]) -> object\n\nWait for a table or all the tables in a database to be ready. A table may be temporarily unavailable after creation, rebalancing or reconfiguring. The `wait` command blocks until the given table (or database) is fully up to date.\n\nThe `wait` command takes two optional arguments:\n\n* `wait_for`: a string indicating a table [status](http://rethinkdb.com/api/python/status) to wait on before returning, one of `ready_for_outdated_reads`, `ready_for_reads`, `ready_for_writes`, or `all_replicas_ready`. The default is `ready_for_writes`. \n* `timeout`: a number indicating maximum time, in seconds, to wait for the table to be ready. If this value is exceeded, a `ReqlRuntimeError` will be thrown. A value of`0` means no timeout. The default is `0` (no timeout).\n\nThe return value is an object consisting of a single field, `ready`. The value is an integer indicating the number of tables waited for. It will always be `1` when `wait` is called on a table, and the total number of tables when called on a database.\n\n*Example* Wait on a table to be ready.\n\n r.table('superheroes').wait().run(conn)\n \n {\"ready\": 1}\n",
),
(
rethinkdb.ast.RqlQuery.avg,
b"sequence.avg([field | function]) -> number\n\nAverages all the elements of a sequence. If called with a field name,\naverages all the values of that field in the sequence, skipping\nelements of the sequence that lack that field. If called with a\nfunction, calls that function on every element of the sequence and\naverages the results, skipping elements of the sequence where that\nfunction returns `None` or a non-existence error.\n\nProduces a non-existence error when called on an empty sequence. You\ncan handle this case with `default`.\n\n*Example* What's the average of 3, 5, and 7?\n\n r.expr([3, 5, 7]).avg().run(conn)\n\n*Example* What's the average number of points scored in a game?\n\n r.table('games').avg('points').run(conn)\n\n*Example* What's the average number of points scored in a game,\ncounting bonus points?\n\n r.table('games').avg(lambda game:\n game['points'] + game['bonus_points']\n ).run(conn)\n\n*Example* What's the average number of points scored in a game?\n(But return `None` instead of raising an error if there are no games where\npoints have been scored.)\n\n r.table('games').avg('points').default(None).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.contains,
b"sequence.contains([value | predicate_function, ...]) -> bool\n\nWhen called with values, returns `True` if a sequence contains all the\nspecified values. When called with predicate functions, returns `True`\nif for each predicate there exists at least one element of the stream\nwhere that predicate returns `True`.\n\nValues and predicates may be mixed freely in the argument list.\n\n*Example* Has Iron Man ever fought Superman?\n\n r.table('marvel').get('ironman')['opponents'].contains('superman').run(conn)\n\n*Example* Has Iron Man ever defeated Superman in battle?\n\n r.table('marvel').get('ironman')['battles'].contains(lambda battle:\n (battle['winner'] == 'ironman') & (battle['loser'] == 'superman')\n ).run(conn)\n\n*Example* Use `contains` with a predicate function to simulate an `or`. Return the Marvel superheroes who live in Detroit, Chicago or Hoboken.\n\n r.table('marvel').filter(\n lambda hero: r.expr(['Detroit', 'Chicago', 'Hoboken']).contains(hero['city'])\n ).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.count,
b"sequence.count([value | predicate_function]) -> number\nbinary.count() -> number\nstring.count() -> number\nobject.count() -> number\n\nCounts the number of elements in a sequence or key/value pairs in an object, or returns the size of a string or binary object.\n\nWhen `count` is called on a sequence with a predicate value or function, it returns the number of elements in the sequence equal to that value or where the function returns `True`. On a [binary](http://rethinkdb.com/api/python/binary) object, `count` returns the size of the object in bytes; on strings, `count` returns the string's length. This is determined by counting the number of Unicode codepoints in the string, counting combining codepoints separately.\n\n*Example* Count the number of users.\n\n r.table('users').count().run(conn)\n\n*Example* Count the number of 18 year old users.\n\n r.table('users')['age'].count(18).run(conn)\n\n*Example* Count the number of users over 18.\n\n r.table('users')['age'].count(lambda age: age > 18).run(conn)\n\n r.table('users').count(lambda user: user['age'] > 18).run(conn)\n\n*Example* Return the length of a Unicode string.\n\n > r.expr(u'\xe3\x81\x93\xe3\x82\x93\xe3\x81\xab\xe3\x81\xa1\xe3\x81\xaf').count().run(conn)\n 5\n",
),
(
rethinkdb.ast.RqlQuery.distinct,
b"sequence.distinct() -> array\ntable.distinct([index=<indexname>]) -> stream\n\nRemoves duplicate elements from a sequence.\n\nThe `distinct` command can be called on any sequence or table with an index.\n\n*Example* Which unique villains have been vanquished by Marvel heroes?\n\n r.table('marvel').concat_map(\n lambda hero: hero['villain_list']).distinct().run(conn)\n\n*Example* Topics in a table of messages have a secondary index on them, and more than one message can have the same topic. What are the unique topics in the table?\n\n r.table('messages').distinct(index='topics').run(conn)\n\nThe above structure is functionally identical to:\n\n r.table('messages')['topics'].distinct().run(conn)\n\nHowever, the first form (passing the index as an argument to `distinct`) is faster, and won't run into array limit issues since it's returning a stream.\n",
),
(
rethinkdb.ast.RqlQuery.fold,
b"sequence.fold(base, function) -> value\nsequence.fold(base, function, emit=function[, final_emit=function]) -> sequence\n\nApply a function to a sequence in order, maintaining state via an accumulator. The `fold` command returns either a single value or a new sequence.\n\nIn its first form, `fold` operates like reduce, returning a value by applying a combining function to each element in a sequence, passing the current element and the previous reduction result to the function. However, `fold` has the following differences from `reduce`:\n\n* it is guaranteed to proceed through the sequence from first element to last.\n* it passes an initial base value to the function with the first element in place of the previous reduction result.\n\nIn its second form, `fold` operates like concat_map, returning a new sequence rather than a single value. When an `emit` function is provided, `fold` will:\n\n* proceed through the sequence in order and take an initial base value, as above.\n* for each element in the sequence, call both the combining function and a separate emitting function with the current element and previous reduction result.\n* optionally pass the result of the combining function to the emitting function.\n\nIf provided, the emitting function must return a list.\n\n*Example* Concatenate words from a list.\n\n r.table('words').order_by('id').fold('',\n lambda acc, word: acc + r.branch(acc == '', '', ', ') + word\n ).run(conn)\n\n(This example could be implemented with `reduce`, but `fold` will preserve the order when `words` is a RethinkDB table or other stream, which is not guaranteed with `reduce`.)\n\n*Example* Return every other row in a table.\n\n r.table('even_things').fold(0,\n lambda acc, row: acc + 1,\n emit=lambda acc, row: r.branch((acc % 2 == 0), [row], [])\n ).run(conn)\n\nThe first function increments the accumulator each time it's called, starting at `0`; the second function, the emitting function, alternates between returning a single-item list containing the current row or an empty list. The `fold` command will return a concatenated list of each emitted value.\n\n*Example* Compute a five-day running average for a weight tracker.\n\n r.table('tracker').filter({'name': 'bob'}).order_by('date')['weight'].fold(\n [],\n lambda acc, row: ([row] + acc).limit(5),\n emit=lambda acc, row, new_acc: r.branch(new_acc.size() == 5, [new_acc.avg()], [])\n ).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.group,
b"sequence.group(field | function..., [index=<indexname>, multi=False]) -> grouped_stream\n\nTakes a stream and partitions it into multiple groups based on the\nfields or functions provided.\n\nWith the `multi` flag single documents can be assigned to multiple groups, similar to the behavior of [multi-indexes](http://rethinkdb.com/docs/secondary-indexes/python). When `multi` is `True` and the grouping value is an array, documents will be placed in each group that corresponds to the elements of the array. If the array is empty the row will be ignored.\n\nSuppose that the table `games` has the following data:\n\n [\n {\"id\": 2, \"player\": \"Bob\", \"points\": 15, \"type\": \"ranked\"},\n {\"id\": 5, \"player\": \"Alice\", \"points\": 7, \"type\": \"free\"},\n {\"id\": 11, \"player\": \"Bob\", \"points\": 10, \"type\": \"free\"},\n {\"id\": 12, \"player\": \"Alice\", \"points\": 2, \"type\": \"free\"}\n ]\n\n*Example* Group games by player.\n\n > r.table('games').group('player').run(conn)\n \n {\n \"Alice\": [\n {\"id\": 5, \"player\": \"Alice\", \"points\": 7, \"type\": \"free\"},\n {\"id\": 12, \"player\": \"Alice\", \"points\": 2, \"type\": \"free\"}\n ],\n \"Bob\": [\n {\"id\": 2, \"player\": \"Bob\", \"points\": 15, \"type\": \"ranked\"},\n {\"id\": 11, \"player\": \"Bob\", \"points\": 10, \"type\": \"free\"}\n ]\n }\n\n<!-- stop -->\n\nCommands chained after `group` will be called on each of these grouped\nsub-streams, producing grouped data.\n\n*Example* What is each player's best game?\n\n > r.table('games').group('player').max('points').run(conn)\n \n {\n \"Alice\": {\"id\": 5, \"player\": \"Alice\", \"points\": 7, \"type\": \"free\"},\n \"Bob\": {\"id\": 2, \"player\": \"Bob\", \"points\": 15, \"type\": \"ranked\"}\n }\n\nCommands chained onto grouped data will operate on each grouped datum,\nproducing more grouped data.\n\n*Example* What is the maximum number of points scored by each player?\n\n > r.table('games').group('player').max('points')['points'].run(conn)\n \n {\n \"Alice\": 7,\n \"Bob\": 15\n }\n\nYou can also group by more than one field.\n\n*Example* What is the maximum number of points scored by each\nplayer for each game type?\n\n > r.table('games').group('player', 'type').max('points')['points'].run(conn)\n \n {\n (\"Alice\", \"free\"): 7,\n (\"Bob\", \"free\"): 10,\n (\"Bob\", \"ranked\"): 15\n }\n\nYou can also group by a function.\n\n*Example* What is the maximum number of points scored by each\nplayer for each game type?\n\n > r.table('games')\n .group(lambda game:\n game.pluck('player', 'type')\n ).max('points')['points'].run(conn)\n \n {\n frozenset([('player', 'Alice'), ('type', 'free')]): 7,\n frozenset([('player', 'Bob'), ('type', 'free')]): 10,\n frozenset([('player', 'Bob'), ('type', 'ranked')]): 15,\n }\n\nUsing a function, you can also group by date on a ReQL [date field](http://rethinkdb.com/docs/dates-and-times/javascript/).\n\n*Example* How many matches have been played this year by month?\n\n > r.table('matches').group(\n lambda match: [match['date'].year(), match['date'].month()]\n ).count().run(conn)\n \n {\n (2014, 2): 2,\n (2014, 3): 2,\n (2014, 4): 1,\n (2014, 5): 3\n }\n\nYou can also group on an index (primary key or secondary).\n\n*Example* What is the maximum number of points scored by game type?\n\n > r.table('games').group(index='type').max('points')['points'].run(conn)\n \n {\n \"free\": 10,\n \"ranked\": 15\n }\n\nSuppose that the table `games2` has the following data:\n\n [\n { 'id': 1, 'matches': {'a': [1, 2, 3], 'b': [4, 5, 6]} },\n { 'id': 2, 'matches': {'b': [100], 'c': [7, 8, 9]} },\n { 'id': 3, 'matches': {'a': [10, 20], 'c': [70, 80]} }\n ]\n\nUsing the `multi` option we can group data by match A, B or C.\n\n > r.table('games2').group(r.row['matches'].keys(), multi=True).run(conn)\n \n [\n {\n 'group': 'a',\n 'reduction': [ <id 1>, <id 3> ]\n },\n {\n 'group': 'b',\n 'reduction': [ <id 1>, <id 2> ]\n },\n {\n 'group': 'c',\n 'reduction': [ <id 2>, <id 3> ]\n }\n ]\n\n(The full result set is abbreviated in the figure; `<id 1>, <id 2>` and `<id 3>` would be the entire documents matching those keys.)\n\n*Example* Use [map](http://rethinkdb.com/api/python/map) and [sum](http://rethinkdb.com/api/python/sum) to get the total points scored for each match.\n\n r.table('games2').group(r.row['matches'].keys(), multi=True).ungroup().map(\n lambda doc: { 'match': doc['group'], 'total': doc['reduction'].sum(\n lambda set: set['matches'][doc['group']].sum()\n )}).run(conn)\n \n [\n { 'match': 'a', 'total': 36 },\n { 'match': 'b', 'total': 115 },\n { 'match': 'c', 'total': 174 }\n ]\n\nThe inner `sum` adds the scores by match within each document; the outer `sum` adds those results together for a total across all the documents.\n\nIf you want to operate on all the groups rather than operating on each\ngroup (e.g. if you want to order the groups by their reduction), you\ncan use [ungroup](http://rethinkdb.com/api/python/ungroup/) to turn a grouped stream or\ngrouped data into an array of objects representing the groups.\n\n*Example* Ungrouping grouped data.\n\n > r.table('games').group('player').max('points')['points'].ungroup().run(conn)\n \n [\n {\n \"group\": \"Alice\",\n \"reduction\": 7\n },\n {\n \"group\": \"Bob\",\n \"reduction\": 15\n }\n ]\n\nUngrouping is useful e.g. for ordering grouped data, or for inserting\ngrouped data into a table.\n\n*Example* What is the maximum number of points scored by each\nplayer, with the highest scorers first?\n\n > r.table('games').group('player').max('points')['points'].ungroup().order_by(\n r.desc('reduction')).run(conn)\n \n [\n {\n \"group\": \"Bob\",\n \"reduction\": 15\n },\n {\n \"group\": \"Alice\",\n \"reduction\": 7\n }\n ]\n\nWhen grouped data are returned to the client, they are transformed\ninto a client-specific native type. (Something similar is done with\n[times](http://rethinkdb.com/docs/dates-and-times/).) In Python, grouped data are\ntransformed into a `dictionary`. If the group value is an `array`, the\nkey is converted to a `tuple`. If the group value is a `dictionary`,\nit will be converted to a `frozenset`.\n\nIf you instead want to receive the raw\npseudotype from the server (e.g. if you're planning to serialize the\nresult as JSON), you can specify `group_format: 'raw'` as an optional\nargument to `run`:\n\n*Example* Get back the raw `GROUPED_DATA` pseudotype.\n\n > r.table('games').group('player').avg('points').run(conn, group_format='raw')\n \n {\n \"$reql_type$\": \"GROUPED_DATA\",\n \"data\": [\n [\"Alice\", 4.5],\n [\"Bob\", 12.5]\n ]\n }\n\nNot passing the `group_format` flag would return:\n\n {\n \"Alice\": 4.5,\n \"Bob\": 12.5\n }\n\nYou might also want to use the [ungroup](http://rethinkdb.com/api/python/ungroup/)\ncommand (see above), which will turn the grouped data into an array of\nobjects on the server.\n\nIf you run a query that returns a grouped stream, it will be\nautomatically converted to grouped data before being sent back to you\n(there is currently no efficient way to stream groups from RethinkDB).\nThis grouped data is subject to the array size limit (see [run](http://rethinkdb.com/api/python/run)).\n\nIn general, operations on grouped streams will be efficiently\ndistributed, and operations on grouped data won't be. You can figure\nout what you're working with by putting `type_of` on the end of your\nquery. Below are efficient and inefficient examples.\n\n*Example* Efficient operation.\n\n # r.table('games').group('player').type_of().run(conn)\n # Returns \"GROUPED_STREAM\"\n r.table('games').group('player').min('points').run(conn) # EFFICIENT\n\n*Example* Inefficient operation.\n\n # r.table('games').group('player').order_by('score').type_of().run(conn)\n # Returns \"GROUPED_DATA\"\n r.table('games').group('player').order_by('score').nth(0).run(conn) # INEFFICIENT\n\nWhat does it mean to be inefficient here? When operating on grouped\ndata rather than a grouped stream, *all* of the data has to be\navailable on the node processing the query. This means that the\noperation will only use one server's resources, and will require\nmemory proportional to the size of the grouped data it's operating\non. (In the case of the [order_by](http://rethinkdb.com/api/python/order_by/) in the inefficient example, that\nmeans memory proportional **to the size of the table**.) The array\nlimit is also enforced for grouped data, so the `order_by` example\nwould fail for tables with more than 100,000 rows unless you used the `array_limit` option with `run`.\n\n*Example* What is the maximum number of points scored by each\nplayer in free games?\n\n > r.table('games').filter(lambda game:\n game['type'] = 'free'\n ).group('player').max('points')['points'].run(conn)\n \n {\n \"Alice\": 7,\n \"Bob\": 10\n }\n\n*Example* What is each player's highest even and odd score?\n\n > r.table('games')\n .group('name', lambda game:\n game['points'] % 2\n ).max('points')['points'].run(conn)\n \n {\n (\"Alice\", 1): 7,\n (\"Bob\", 0): 10,\n (\"Bob\", 1): 15\n }\n",
),
(
rethinkdb.ast.RqlQuery.max,
b"sequence.max(field | function) -> element\nsequence.max(index=<indexname>) -> element\n\nFinds the maximum element of a sequence.\n\nThe `max` command can be called with:\n\n* a **field name**, to return the element of the sequence with the largest value in that field;\n* an **index** (the primary key or a secondary index), to return the element of the sequence with the largest value in that index;\n* a **function**, to apply the function to every element within the sequence and return the element which returns the largest value from the function, ignoring any elements where the function produces a non-existence error.\n\nFor more information on RethinkDB's sorting order, read the section in [ReQL data types](http://rethinkdb.com/docs/data-types/#sorting-order).\n\nCalling `max` on an empty sequence will throw a non-existence error; this can be handled using the [default](http://rethinkdb.com/api/python/default/) command.\n\n*Example* Return the maximum value in the list `[3, 5, 7]`.\n\n r.expr([3, 5, 7]).max().run(conn)\n\n*Example* Return the user who has scored the most points.\n\n r.table('users').max('points').run(conn)\n\n*Example* The same as above, but using a secondary index on the `points` field.\n\n r.table('users').max(index='points').run(conn)\n\n*Example* Return the user who has scored the most points, adding in bonus points from a separate field using a function.\n\n r.table('users').max(lambda user:\n user['points'] + user['bonus_points']\n ).run(conn)\n\n*Example* Return the highest number of points any user has ever scored. This returns the value of that `points` field, not a document.\n\n r.table('users').max('points')['points'].run(conn)\n\n*Example* Return the user who has scored the most points, but add a default `None` return value to prevent an error if no user has ever scored points.\n\n r.table('users').max('points').default(None).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.min,
b"sequence.min(field | function) -> element\nsequence.min(index=<indexname>) -> element\n\nFinds the minimum element of a sequence.\n\nThe `min` command can be called with:\n\n* a **field name**, to return the element of the sequence with the smallest value in that field;\n* an **index** (the primary key or a secondary index), to return the element of the sequence with the smallest value in that index;\n* a **function**, to apply the function to every element within the sequence and return the element which returns the smallest value from the function, ignoring any elements where the function produces a non-existence error.\n\nFor more information on RethinkDB's sorting order, read the section in [ReQL data types](http://rethinkdb.com/docs/data-types/#sorting-order).\n\nCalling `min` on an empty sequence will throw a non-existence error; this can be handled using the [default](http://rethinkdb.com/api/python/default/) command.\n\n*Example* Return the minimum value in the list `[3, 5, 7]`.\n\n r.expr([3, 5, 7]).min().run(conn)\n\n*Example* Return the user who has scored the fewest points.\n\n r.table('users').min('points').run(conn)\n\n*Example* The same as above, but using a secondary index on the `points` field.\n\n r.table('users').min(index='points').run(conn)\n\n*Example* Return the user who has scored the fewest points, adding in bonus points from a separate field using a function.\n\n r.table('users').min(lambda user:\n user['points'] + user['bonus_points']\n ).run(conn)\n\n*Example* Return the smallest number of points any user has ever scored. This returns the value of that `points` field, not a document.\n\n r.table('users').min('points')['points'].run(conn)\n\n*Example* Return the user who has scored the fewest points, but add a default `None` return value to prevent an error if no user has ever scored points.\n\n r.table('users').min('points').default(None).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.reduce,
b'sequence.reduce(function) -> value\n\nProduce a single value from a sequence through repeated application of a reduction function.\n\nThe reduction function can be called on:\n\n- two elements of the sequence\n- one element of the sequence and one result of a previous reduction\n- two results of previous reductions\n\nThe reduction function can be called on the results of two previous reductions because the\n`reduce` command is distributed and parallelized across shards and CPU cores. A common\nmistaken when using the `reduce` command is to suppose that the reduction is executed\nfrom left to right. Read the [map-reduce in RethinkDB](http://rethinkdb.com/docs/map-reduce/) article to\nsee an example.\n\nIf the sequence is empty, the server will produce a `ReqlRuntimeError` that can be\ncaught with `default`. \nIf the sequence has only one element, the first element will be returned.\n\n*Example* Return the number of documents in the table `posts`.\n\n r.table("posts").map(lambda doc: 1)\n .reduce(lambda left, right: left+right)\n .default(0).run(conn)\n\nA shorter way to execute this query is to use [count](http://rethinkdb.com/api/python/count).\n\n*Example* Suppose that each `post` has a field `comments` that is an array of\ncomments. \nReturn the number of comments for all posts.\n\n r.table("posts").map(lambda doc:\n doc["comments"].count()\n ).reduce(lambda left, right:\n left+right\n ).default(0).run(conn)\n\n*Example* Suppose that each `post` has a field `comments` that is an array of\ncomments. \nReturn the maximum number comments per post.\n\n r.table("posts").map(lambda doc:\n doc["comments"].count()\n ).reduce(lambda left, right:\n r.branch(\n left > right,\n left,\n right\n )\n ).default(0).run(conn)\n\nA shorter way to execute this query is to use [max](http://rethinkdb.com/api/python/max).\n',
),
(
rethinkdb.ast.RqlQuery.sum,
b"sequence.sum([field | function]) -> number\n\nSums all the elements of a sequence. If called with a field name,\nsums all the values of that field in the sequence, skipping elements\nof the sequence that lack that field. If called with a function,\ncalls that function on every element of the sequence and sums the\nresults, skipping elements of the sequence where that function returns\n`None` or a non-existence error.\n\nReturns `0` when called on an empty sequence.\n\n*Example* What's 3 + 5 + 7?\n\n r.expr([3, 5, 7]).sum().run(conn)\n\n*Example* How many points have been scored across all games?\n\n r.table('games').sum('points').run(conn)\n\n*Example* How many points have been scored across all games,\ncounting bonus points?\n\n r.table('games').sum(lambda game:\n game['points'] + game['bonus_points']\n ).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.ungroup,
b'grouped_stream.ungroup() -> array\ngrouped_data.ungroup() -> array\n\nTakes a grouped stream or grouped data and turns it into an array of\nobjects representing the groups. Any commands chained after `ungroup`\nwill operate on this array, rather than operating on each group\nindividually. This is useful if you want to e.g. order the groups by\nthe value of their reduction.\n\nThe format of the array returned by `ungroup` is the same as the\ndefault native format of grouped data in the JavaScript driver and\ndata explorer.\n\nSuppose that the table `games` has the following data:\n\n [\n {"id": 2, "player": "Bob", "points": 15, "type": "ranked"},\n {"id": 5, "player": "Alice", "points": 7, "type": "free"},\n {"id": 11, "player": "Bob", "points": 10, "type": "free"},\n {"id": 12, "player": "Alice", "points": 2, "type": "free"}\n ]\n\n*Example* What is the maximum number of points scored by each\nplayer, with the highest scorers first?\n\n r.table(\'games\')\n .group(\'player\').max(\'points\')[\'points\']\n .ungroup().order_by(r.desc(\'reduction\')).run(conn)\n\n<!-- stop -->\n\nResult: \n\n [\n {\n "group": "Bob",\n "reduction": 15\n },\n {\n "group": "Alice",\n "reduction": 7\n }\n ]\n\n*Example* Select one random player and all their games.\n\n r.table(\'games\').group(\'player\').ungroup().sample(1).run(conn)\n\nResult:\n\n [\n {\n "group": "Bob",\n "reduction": [\n {"id": 2, "player": "Bob", "points": 15, "type": "ranked"},\n {"id": 11, "player": "Bob", "points": 10, "type": "free"}\n ]\n }\n ]\n\nNote that if you didn\'t call `ungroup`, you would instead select one\nrandom game from each player:\n\n r.table(\'games\').group(\'player\').sample(1).run(conn)\n\nResult:\n\n {\n "Alice": [\n {"id": 5, "player": "Alice", "points": 7, "type": "free"}\n ],\n "Bob": [\n {"id": 11, "player": "Bob", "points": 10, "type": "free"}\n ]\n }\n\n*Example* Types!\n\n r.table(\'games\').group(\'player\').type_of().run(conn) # Returns "GROUPED_STREAM"\n r.table(\'games\').group(\'player\').ungroup().type_of().run(conn) # Returns "ARRAY"\n r.table(\'games\').group(\'player\').avg(\'points\').run(conn) # Returns "GROUPED_DATA"\n r.table(\'games\').group(\'player\').avg(\'points\').ungroup().run(conn) #Returns "ARRAY"\n',
),
(
rethinkdb.args,
b"r.args(array) -> special\n\n`r.args` is a special term that's used to splice an array of arguments\ninto another term. This is useful when you want to call a variadic\nterm such as [get_all](http://rethinkdb.com/api/python/get_all/) with a set of arguments produced at runtime.\n\nThis is analogous to unpacking argument lists in Python.\n\n*Example* Get Alice and Bob from the table `people`.\n\n r.table('people').get_all('Alice', 'Bob').run(conn)\n # or\n r.table('people').get_all(r.args(['Alice', 'Bob'])).run(conn)\n\n*Example* Get all of Alice's children from the table `people`.\n\n # r.table('people').get('Alice') returns {'id': 'Alice', 'children': ['Bob', 'Carol']}\n r.table('people').get_all(r.args(r.table('people').get('Alice')['children'])).run(conn)\n",
),
(
rethinkdb.binary,
b"r.binary(data) -> binary\n\nEncapsulate binary data within a query.\n\nThe type of data `binary` accepts depends on the client language. In Python, it expects a parameter of `bytes` type. Using a `bytes` object within a query implies the use of `binary` and the ReQL driver will automatically perform the coercion (in Python 3 only).\n\nBinary objects returned to the client in JavaScript will also be of the `bytes` type. This can be changed with the `binary_format` option provided to [run](http://rethinkdb.com/api/python/run) to return \"raw\" objects.\n\nOnly a limited subset of ReQL commands may be chained after `binary`:\n\n* [coerce_to](http://rethinkdb.com/api/python/coerce_to/) can coerce `binary` objects to `string` types\n* [count](http://rethinkdb.com/api/python/count/) will return the number of bytes in the object\n* [slice](http://rethinkdb.com/api/python/slice/) will treat bytes like array indexes (i.e., `slice(10,20)` will return bytes 10–19)\n* [type_of](http://rethinkdb.com/api/python/type_of) returns `PTYPE<BINARY>`\n* [info](http://rethinkdb.com/api/python/info) will return information on a binary object.\n\n*Example* Save an avatar image to a existing user record.\n\n f = open('./default_avatar.png', 'rb')\n avatar_image = f.read()\n f.close()\n r.table('users').get(100).update({'avatar': r.binary(avatar_image)}).run(conn)\n\n*Example* Get the size of an existing avatar image.\n\n r.table('users').get(100)['avatar'].count().run(conn)\n \n 14156\n\nRead more details about RethinkDB's binary object support: [Storing binary objects](http://rethinkdb.com/docs/storing-binary/).\n",
),
(
rethinkdb.branch,
b'r.branch(test, true_action[, test2, test2_action, ...], false_action) -> any\ntest.branch(true_action[, test2, test2_action, ...], false_action) -> any\n\nPerform a branching conditional equivalent to `if-then-else`.\n\nThe `branch` command takes 2n+1 arguments: pairs of conditional expressions and commands to be executed if the conditionals return any value but `False` or `None` (i.e., "truthy" values), with a final "else" command to be evaluated if all of the conditionals are `False` or `None`.\n\n<!-- break -->\n\nYou may call `branch` infix style on the first test. (See the second example for an illustration.)\n\nr.branch(test1, val1, test2, val2, elseval)\n\nis the equivalent of the Python statement\n\n if test1:\n return val1\n elif test2:\n return val2\n else:\n return elseval\n\n*Example* Test the value of x.\n\n x = 10\n r.branch((x > 5), \'big\', \'small\').run(conn)\n \n > "big"\n\n*Example* As above, infix-style.\n\n x = 10\n r.expr(x > 5).branch(\'big\', \'small\').run(conn)\n \n > "big"\n\n*Example* Categorize heroes by victory counts.\n\n r.table(\'marvel\').map(\n r.branch(\n r.row[\'victories\'] > 100,\n r.row[\'name\'] + \' is a superhero\',\n r.row[\'victories\'] > 10,\n r.row[\'name\'] + \' is a hero\',\n r.row[\'name\'] + \' is very nice\'\n )\n ).run(conn)\n\nIf the documents in the table `marvel` are:\n\n [\n { "name": "Iron Man", "victories": 214 },\n { "name": "Jubilee", "victories": 49 },\n { "name": "Slava", "victories": 5 }\n ]\n\nThe results will be:\n\n [\n "Iron Man is a superhero",\n "Jubilee is a hero",\n "Slava is very nice"\n ]\n',
),
(
rethinkdb.ast.RqlQuery.coerce_to,
b"sequence.coerce_to('array') -> array\nvalue.coerce_to('string') -> string\nstring.coerce_to('number') -> number\narray.coerce_to('object') -> object\nsequence.coerce_to('object') -> object\nobject.coerce_to('array') -> array\nbinary.coerce_to('string') -> string\nstring.coerce_to('binary') -> binary\n\nConvert a value of one type into another.\n\n* a sequence, selection or object can be coerced to an array\n* a sequence, selection or an array of key-value pairs can be coerced to an object\n* a string can be coerced to a number\n* any datum (single value) can be coerced to a string\n* a binary object can be coerced to a string and vice-versa\n\n*Example* Coerce a stream to an array to store its output in a field. (A stream cannot be stored in a field directly.)\n\n r.table('posts').map(lambda post: post.merge(\n { 'comments': r.table('comments').get_all(post['id'], index='post_id').coerce_to('array') }\n )).run(conn)\n\n*Example* Coerce an array of pairs into an object.\n\n r.expr([['name', 'Ironman'], ['victories', 2000]]).coerce_to('object').run(conn)\n\n__Note:__ To coerce a list of key-value pairs like `['name', 'Ironman', 'victories', 2000]` to an object, use the [object](http://rethinkdb.com/api/python/object) command.\n\n*Example* Coerce a number to a string.\n\n r.expr(1).coerce_to('string').run(conn)\n\n",
),
(
rethinkdb.ast.RqlQuery.default,
b'value.default(default_value | function) -> any\nsequence.default(default_value | function) -> any\n\nProvide a default value in case of non-existence errors. The `default` command evaluates its first argument (the value it\'s chained to). If that argument returns `None` or a non-existence error is thrown in evaluation, then `default` returns its second argument. The second argument is usually a default value, but it can be a function that returns a value.\n\n*Example* Retrieve the titles and authors of the table `posts`.\nIn the case where the author field is missing or `None`, we want to retrieve the string\n`Anonymous`.\n\n r.table("posts").map(lambda post:\n {\n "title": post["title"],\n "author": post["author"].default("Anonymous")\n }\n ).run(conn)\n\n<!-- stop -->\n\nWe can rewrite the previous query with `r.branch` too.\n\n r.table("posts").map(lambda post:\n r.branch(\n post.has_fields("author"),\n {\n "title": post["title"],\n "author": post["author"]\n },\n {\n "title": post["title"],\n "author": "Anonymous" \n }\n )\n ).run(conn)\n\n*Example* The `default` command can also be used to filter documents. Retrieve all our users who are not grown-ups or whose age is unknown\n(i.e., the field `age` is missing or equals `None`).\n\n r.table("users").filter(lambda user:\n (user["age"] < 18).default(True)\n ).run(conn)\n\nOne more way to write the previous query is to set the age to be `-1` when the\nfield is missing.\n\n r.table("users").filter(lambda user:\n user["age"].default(-1) < 18\n ).run(conn)\n\nThis can be accomplished with [has_fields](http://rethinkdb.com/api/python/has_fields/) rather than `default`.\n\n r.table("users").filter(lambda user:\n user.has_fields("age").not_() | (user["age"] < 18)\n ).run(conn)\n\nThe body of every [filter](http://rethinkdb.com/api/python/filter/) is wrapped in an implicit `.default(False)`. You can overwrite the value `False` with the `default` option.\n\n r.table("users").filter(\n lambda user: (user["age"] < 18).default(True),\n default=True\n ).run(conn)\n\n*Example* The function form of `default` receives the error message as its argument.\n\n r.table("posts").map(lambda post:\n {\n "title": post["title"],\n "author": post["author"].default(lambda err: err)\n }\n ).run(conn)\n\nThis particular example simply returns the error message, so it isn\'t very useful. But it would be possible to change the default value based on the specific error message thrown.\n',
),
(
rethinkdb.ast.RqlQuery.do,
b"any.do(function) -> any\nr.do([args]*, function) -> any\nany.do(expr) -> any\nr.do([args]*, expr) -> any\n\nCall an anonymous function using return values from other ReQL commands or queries as arguments.\n\nThe last argument to `do` (or, in some forms, the only argument) is an expression or an anonymous function which receives values from either the previous arguments or from prefixed commands chained before `do`. The `do` command is essentially a single-element [map](http://rethinkdb.com/api/python/map/), letting you map a function over just one document. This allows you to bind a query result to a local variable within the scope of `do`, letting you compute the result just once and reuse it in a complex expression or in a series of ReQL commands.\n\nArguments passed to the `do` function must be basic data types, and cannot be streams or selections. (Read about [ReQL data types](http://rethinkdb.com/docs/data-types/).) While the arguments will all be evaluated before the function is executed, they may be evaluated in any order, so their values should not be dependent on one another. The type of `do`'s result is the type of the value returned from the function or last expression.\n\n*Example* Compute a golfer's net score for a game.\n\n r.table('players').get('86be93eb-a112-48f5-a829-15b2cb49de1d').do(\n lambda player: player['gross_score'] - player['course_handicap']\n ).run(conn)\n\n*Example* Return the name of the best scoring player in a two-player golf match.\n\n r.do(r.table('players').get(id1), r.table('players').get(id2),\n (lambda player1, player2:\n r.branch(player1['gross_score'].lt(player2['gross_score']),\n player1, player2))\n ).run(conn)\n\nNote that `branch`, the ReQL conditional command, must be used instead of `if`. See the `branch` [documentation](http://rethinkdb.com/api/python/branch) for more.\n\n*Example* Take different actions based on the result of a ReQL [insert](http://rethinkdb.com/api/python/insert) command.\n\n new_data = {\n 'id': 100,\n 'name': 'Agatha',\n 'gross_score': 57,\n 'course_handicap': 4\n }\n r.table('players').insert(new_data).do(lambda doc:\n r.branch((doc['inserted'] != 0),\n r.table('log').insert({'time': r.now(), 'response': doc, 'result': 'ok'}),\n r.table('log').insert({'time': r.now(), 'response': doc, 'result': 'error'}))\n ).run(conn)\n",
),
(
rethinkdb.error,
b"r.error(message) -> error\n\nThrow a runtime error. If called with no arguments inside the second argument to `default`, re-throw the current error.\n\n*Example* Iron Man can't possibly have lost a battle:\n\n r.table('marvel').get('IronMan').do(\n lambda ironman: r.branch(ironman['victories'] < ironman['battles'],\n r.error('impossible code path'),\n ironman)\n ).run(conn)\n\n",
),
(
rethinkdb.expr,
b"r.expr(value) -> value\n\nConstruct a ReQL JSON object from a native object.\n\nIf the native object is of the `bytes` type, then `expr` will return a binary object. See [binary](http://rethinkdb.com/api/python/binary) for more information.\n\n*Example* Objects wrapped with `expr` can then be manipulated by ReQL API functions.\n\n r.expr({'a':'b'}).merge({'b':[1,2,3]}).run(conn)\n\n",
),
(
rethinkdb.ast.RqlQuery.for_each,
b"sequence.for_each(write_function) -> object\n\nLoop over a sequence, evaluating the given write query for each element.\n\n*Example* Now that our heroes have defeated their villains, we can safely remove them from the villain table.\n\n r.table('marvel').for_each(\n lambda hero: r.table('villains').get(hero['villainDefeated']).delete()\n ).run(conn)\n\n",
),
(
rethinkdb.http,
b"r.http(url[, options]) -> value\nr.http(url[, options]) -> stream\n\nRetrieve data from the specified URL over HTTP. The return type depends on the `result_format` option, which checks the `Content-Type` of the response by default.\n\n*Example* Perform an HTTP `GET` and store the result in a table.\n\n r.table('posts').insert(r.http('http://httpbin.org/get')).run(conn)\n\n<!-- stop -->\n\nSee [the tutorial](http://rethinkdb.com/docs/external-api-access/) on `r.http` for more examples on how to use this command.\n\n* `timeout`: timeout period in seconds to wait before aborting the connect (default `30`).\n* `attempts`: number of retry attempts to make after failed connections (default `5`).\n* `redirects`: number of redirect and location headers to follow (default `1`).\n* `verify`: if `true`, verify the server's SSL certificate (default `true`).\n* `result_format`: string specifying the format to return results in. One of the following:\n * `text`: always return a string.\n * `json`: parse the result as JSON, raising an error on failure.\n * `jsonp`: parse the result as Padded JSON.\n * `binary`: return a binary object.\n * `auto`: parse the result based on its `Content-Type` (the default):\n * `application/json`: as `json`\n * `application/json-p`, `text/json-p`, `text/javascript`: as `jsonp`\n * `audio/*`, `video/*`, `image/*`, `application/octet-stream`: as `binary`\n * anything else: as `text`\n\n* `method`: HTTP method to use for the request. One of `GET`, `POST`, `PUT`, `PATCH`, `DELETE` or `HEAD`. Default: `GET`.\n* `auth`: object giving authentication, with the following fields:\n * `type`: `basic` (default) or `digest`\n * `user`: username\n * `pass`: password in plain text\n* `params`: object specifying URL parameters to append to the URL as encoded key/value pairs. `{ 'query': 'banana', 'limit': 2 }` will be appended as `?query=banana&limit=2`. Default: no parameters.\n* `header`: Extra header lines to include. The value may be an array of strings or an object. Default: `Accept-Encoding: deflate;q=1, gzip;q=0.5` and `User-Agent: RethinkDB/<VERSION>`.\n* `data`: Data to send to the server on a `POST`, `PUT`, `PATCH`, or `DELETE` request. For `POST` requests, data may be either an object (which will be written to the body as form-encoded key/value pairs) or a string; for all other requests, data will be serialized as JSON and placed in the request body, sent as `Content-Type: application/json`. Default: no data will be sent.\n\n*Example* Perform multiple requests with different parameters.\n\n r.expr([1, 2, 3]).map(\n lambda i: r.http('http://httpbin.org/get', params={'user': i})\n ).run(conn)\n\n*Example* Perform a `PUT` request for each item in a table.\n\n r.table('data').map(\n lambda row: r.http('http://httpbin.org/put', method='PUT', data=row)\n ).run(conn)\n\n*Example* Perform a `POST` request with accompanying data.\n\nUsing form-encoded data:\n\n r.http('http://httpbin.org/post', method='POST',\n data={'player': 'Bob', 'game': 'tic tac toe'}\n ).run(conn)\n\nUsing JSON data:\n\n r.http('http://httpbin.org/post', method='POST',\n data=r.expr(value).coerce_to('string'),\n header={'Content-Type': 'application/json'}\n ).run(conn)\n\n`r.http` supports depagination, which will request multiple pages in a row and aggregate the results into a stream. The use of this feature is controlled by the optional arguments `page` and `page_limit`. Either none or both of these arguments must be provided.\n\n* `page`: This option may specify either a built-in pagination strategy (see below), or a function to provide the next URL and/or `params` to request.\n* `page_limit`: An integer specifying the maximum number of requests to issue using the `page` functionality. This is to prevent overuse of API quotas, and must be specified with `page`.\n * `-1`: no limit\n * `0`: no requests will be made, an empty stream will be returned\n * `n`: `n` requests will be made\n\nAt the moment, the only built-in strategy is `'link-next'`, which is equivalent to `lambda info: info'header'['rel=\"next\"'].default(None)`.\n\n*Example* Perform a GitHub search and collect up to 3 pages of results.\n\n r.http(\"https://api.github.com/search/code?q=addClass+user:mozilla\",\n page='link-next', page_limit=3).run(conn)\n\nAs a function, `page` takes one parameter, an object of the format:\n\n {\n 'params': object, # the URL parameters used in the last request\n 'header': object, # the HTTP headers of the last response as key/value pairs\n 'body': value # the body of the last response in the format specified by `result_format`\n }\n\nThe `header` field will be a parsed version of the header with fields lowercased, like so:\n\n {\n 'content-length': '1024',\n 'content-type': 'application/json',\n 'date': 'Thu, 1 Jan 1970 00:00:00 GMT',\n 'link': {\n 'rel=\"last\"': 'http://example.com/?page=34',\n 'rel=\"next\"': 'http://example.com/?page=2'\n }\n }\n\nThe `page` function may return a string corresponding to the next URL to request, `None` indicating that there is no more to get, or an object of the format:\n\n {\n 'url': string, # the next URL to request, or None for no more pages\n 'params': object # new URL parameters to use, will be merged with the previous request's params\n }\n\n*Example* Perform depagination with a custom `page` function.\n\n r.http('example.com/pages',\n page=(lambda info: info['body']['meta']['next'].default(None)),\n page_limit=5\n ).run(conn)\n\n# Learn more\n\nSee [the tutorial](http://rethinkdb.com/docs/external-api-access/) on `r.http` for more examples on how to use this command.\n",
),
(
rethinkdb.ast.RqlQuery.info,
b"any.info() -> object\nr.info(any) -> object\n\nGet information about a ReQL value.\n\n*Example* Get information about a table such as primary key, or cache size.\n\n r.table('marvel').info().run(conn)\n\n",
),
(
rethinkdb.js,
b"r.js(js_string[, timeout=<number>]) -> value\n\nCreate a javascript expression.\n\n*Example* Concatenate two strings using JavaScript.\n\n`timeout` is the number of seconds before `r.js` times out. The default value is 5 seconds.\n\n r.js(\"'str1' + 'str2'\").run(conn)\n\n*Example* Select all documents where the 'magazines' field is greater than 5 by running JavaScript on the server.\n\n r.table('marvel').filter(\n r.js('(function (row) { return row.magazines.length > 5; })')\n ).run(conn)\n\n*Example* You may also specify a timeout in seconds (defaults to 5).\n\n r.js('while(true) {}', timeout=1.3).run(conn)\n\n",
),
(
rethinkdb.json,
b'r.json(json_string) -> value\n\nParse a JSON string on the server.\n\n*Example* Send an array to the server.\n\n r.json("[1,2,3]").run(conn)\n\n',
),
(
rethinkdb.range,
b'r.range() -> stream\nr.range([start_value, ]end_value) -> stream\n\nGenerate a stream of sequential integers in a specified range.\n\n`range` takes 0, 1 or 2 arguments:\n\n* With no arguments, `range` returns an "infinite" stream from 0 up to and including the maximum integer value;\n* With one argument, `range` returns a stream from 0 up to but not including the end value;\n* With two arguments, `range` returns a stream from the start value up to but not including the end value.\n\nNote that the left bound (including the implied left bound of 0 in the 0- and 1-argument form) is always closed and the right bound is always open: the start value will always be included in the returned range and the end value will *not* be included in the returned range.\n\nAny specified arguments must be integers, or a `ReqlRuntimeError` will be thrown. If the start value is equal or to higher than the end value, no error will be thrown but a zero-element stream will be returned.\n\n*Example* Return a four-element range of `[0, 1, 2, 3]`.\n\n > r.range(4).run(conn)\n \n [0, 1, 2, 3]\n\n<!-- stop -->\n\nYou can also use the [limit](http://rethinkdb.com/api/python/limit) command with the no-argument variant to achieve the same result in this case:\n\n > r.range().limit(4).run(conn)\n \n [0, 1, 2, 3]\n\n*Example* Return a range from -5 through 5.\n\n > r.range(-5, 6).run(conn)\n \n [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]\n',
),
(
rethinkdb.ast.RqlQuery.to_json_string,
b'value.to_json_string() -> string\nvalue.to_json() -> string\n\nConvert a ReQL value or object to a JSON string. You may use either `to_json_string` or `to_json`.\n\n*Example* Get a ReQL document as a JSON string.\n\n > r.table(\'hero\').get(1).to_json()\n \n \'{"id": 1, "name": "Batman", "city": "Gotham", "powers": ["martial arts", "cinematic entrances"]}\'\n',
),
(
rethinkdb.ast.RqlQuery.to_json,
b'value.to_json_string() -> string\nvalue.to_json() -> string\n\nConvert a ReQL value or object to a JSON string. You may use either `to_json_string` or `to_json`.\n\n*Example* Get a ReQL document as a JSON string.\n\n > r.table(\'hero\').get(1).to_json()\n \n \'{"id": 1, "name": "Batman", "city": "Gotham", "powers": ["martial arts", "cinematic entrances"]}\'\n',
),
(
rethinkdb.ast.RqlQuery.type_of,
b'any.type_of() -> string\n\nGets the type of a ReQL query\'s return value.\n\nThe type will be returned as a string:\n\n* `ARRAY`\n* `BOOL`\n* `DB`\n* `FUNCTION`\n* `GROUPED_DATA`\n* `GROUPED_STREAM`\n* `MAXVAL`\n* `MINVAL`\n* `NULL`\n* `NUMBER`\n* `OBJECT`\n* `PTYPE<BINARY>`\n* `PTYPE<GEOMETRY>`\n* `PTYPE<TIME>`\n* `SELECTION<ARRAY>`\n* `SELECTION<OBJECT>`\n* `SELECTION<STREAM>`\n* `STREAM`\n* `STRING`\n* `TABLE_SLICE`\n* `TABLE`\n\nRead the article on [ReQL data types](docs/data-types/) for a more detailed discussion. Note that some possible return values from `type_of` are internal values, such as `MAXVAL`, and unlikely to be returned from queries in standard practice.\n\n*Example* Get the type of a string.\n\n > r.expr("foo").type_of().run(conn)\n "STRING"\n\n',
),
(
rethinkdb.uuid,
b'r.uuid([string]) -> string\n\nReturn a UUID (universally unique identifier), a string that can be used as a unique ID. If a string is passed to `uuid` as an argument, the UUID will be deterministic, derived from the string\'s SHA-1 hash.\n\nRethinkDB\'s UUIDs are standards-compliant. Without the optional argument, a version 4 random UUID will be generated; with that argument, a version 5 UUID will be generated, using a fixed namespace UUID of `91461c99-f89d-49d2-af96-d8e2e14e9b58`. For more information, read Wikipedia\'s UUID article.\n\n*Example* Generate a UUID.\n\n > r.uuid().run(conn)\n \n "27961a0e-f4e8-4eb3-bf95-c5203e1d87b9"\n\n*Example* Generate a UUID based on a string.\n\n > r.uuid("slava@example.com").run(conn)\n \n "90691cbc-b5ea-5826-ae98-951e30fc3b2d"\n',
),
(
rethinkdb.net.Cursor.close,
b"cursor.close()\n\nClose a cursor. Closing a cursor cancels the corresponding query and frees the memory\nassociated with the open request.\n\n*Example* Close a cursor.\n\n cursor.close()\n",
),
(
rethinkdb.net.Cursor.next,
b"cursor.next([wait=True])\n\nGet the next element in the cursor.\n\nThe optional `wait` argument specifies whether to wait for the next available element and how long to wait:\n\n* `True`: Wait indefinitely (the default).\n* `False`: Do not wait at all. If data is immediately available, it will be returned; if it is not available, a `ReqlTimeoutError` will be raised.\n* number: Wait up to the specified number of seconds for data to be available before raising `ReqlTimeoutError`.\n\nThe behavior of `next` will be identical with `False`, `None` or the number `0`.\n\nCalling `next` the first time on a cursor provides the first element of the cursor. If the data set is exhausted (e.g., you have retrieved all the documents in a table), a `ReqlCursorEmpty` error will be raised when `next` is called.\n\n*Example* Retrieve the next element.\n\n cursor = r.table('superheroes').run(conn)\n doc = cursor.next()\n\n*Example* Retrieve the next element on a [changefeed](http://rethinkdb.com/docs/changefeeds/python), waiting up to five seconds.\n\n cursor = r.table('superheroes').changes().run(conn)\n doc = cursor.next(wait=5)\n\n__Note:__ RethinkDB sequences can be iterated through via the Python Iterable interface. The canonical way to retrieve all the results is to use a [for...in](../each/) loop or [list()](../to_array/).\n\n",
),
(
rethinkdb.ast.RqlQuery.date,
b'time.date() -> time\n\nReturn a new time object only based on the day, month and year (ie. the same day at 00:00).\n\n*Example* Retrieve all the users whose birthday is today.\n\n r.table("users").filter(lambda user:\n user["birthdate"].date() == r.now().date()\n ).run(conn)\n\n',
),
(
rethinkdb.ast.RqlQuery.day,
b'time.day() -> number\n\nReturn the day of a time object as a number between 1 and 31.\n\n*Example* Return the users born on the 24th of any month.\n\n r.table("users").filter(\n r.row["birthdate"].day() == 24\n )\n\n',
),
(
rethinkdb.ast.RqlQuery.day_of_week,
b'time.day_of_week() -> number\n\nReturn the day of week of a time object as a number between 1 and 7 (following ISO 8601 standard). For your convenience, the terms r.monday, r.tuesday etc. are defined and map to the appropriate integer.\n\n*Example* Return today\'s day of week.\n\n r.now().day_of_week().run(conn)\n\n*Example* Retrieve all the users who were born on a Tuesday.\n\n r.table("users").filter( lambda user:\n user["birthdate"].day_of_week().eq(r.tuesday)\n )\n\n',
),
(
rethinkdb.ast.RqlQuery.day_of_year,
b'time.day_of_year() -> number\n\nReturn the day of the year of a time object as a number between 1 and 366 (following ISO 8601 standard).\n\n*Example* Retrieve all the users who were born the first day of a year.\n\n r.table("users").filter(\n r.row["birthdate"].day_of_year() == 1\n ).run(conn)\n\n',
),
(
rethinkdb.ast.RqlQuery.during,
b'time.during(start_time, end_time[, left_bound="closed", right_bound="open"])\n -> bool\n\nReturn whether a time is between two other times.\n\nBy default, this is inclusive of the start time and exclusive of the end time. Set `left_bound` and `right_bound` to explicitly include (`closed`) or exclude (`open`) that endpoint of the range.\n\n*Example* Retrieve all the posts that were posted between December 1st, 2013 (inclusive) and December 10th, 2013 (exclusive).\n\n r.table("posts").filter(\n r.row[\'date\'].during(r.time(2013, 12, 1, "Z"), r.time(2013, 12, 10, "Z"))\n ).run(conn)\n\n*Example* Retrieve all the posts that were posted between December 1st, 2013 (exclusive) and December 10th, 2013 (inclusive).\n\n r.table("posts").filter(\n r.row[\'date\'].during(r.time(2013, 12, 1, "Z"), r.time(2013, 12, 10, "Z"), left_bound="open", right_bound="closed")\n ).run(conn)\n\n',
),
(
rethinkdb.epoch_time,
b'r.epoch_time(number) -> time\n\nCreate a time object based on seconds since epoch. The first argument is a double and\nwill be rounded to three decimal places (millisecond-precision).\n\n*Example* Update the birthdate of the user "John" to November 3rd, 1986.\n\n r.table("user").get("John").update({"birthdate": r.epoch_time(531360000)}).run(conn)\n\n',
),
(
rethinkdb.ast.RqlQuery.hours,
b'time.hours() -> number\n\nReturn the hour in a time object as a number between 0 and 23.\n\n*Example* Return all the posts submitted after midnight and before 4am.\n\n r.table("posts").filter(lambda post:\n post["date"].hours() < 4\n ).run(conn)\n\n',
),
(
rethinkdb.ast.RqlQuery.in_timezone,
b"time.in_timezone(timezone) -> time\n\nReturn a new time object with a different timezone. While the time stays the same, the results returned by methods such as hours() will change since they take the timezone into account. The timezone argument has to be of the ISO 8601 format.\n\n*Example* Hour of the day in San Francisco (UTC/GMT -8, without daylight saving time).\n\n r.now().in_timezone('-08:00').hours().run(conn)\n",
),
(
rethinkdb.iso8601,
b"r.iso8601(string[, default_timezone='']) -> time\n\nCreate a time object based on an ISO 8601 date-time string (e.g. '2013-01-01T01:01:01+00:00'). RethinkDB supports all valid ISO 8601 formats except for week dates. Read more about the ISO 8601 format at [Wikipedia](http://en.wikipedia.org/wiki/ISO_8601).\n\nIf you pass an ISO 8601 string without a time zone, you must specify the time zone with the `default_timezone` argument.\n\n*Example* Update the time of John's birth.\n\n r.table(\"user\").get(\"John\").update({\"birth\": r.iso8601('1986-11-03T08:30:00-07:00')}).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.minutes,
b'time.minutes() -> number\n\nReturn the minute in a time object as a number between 0 and 59.\n\n*Example* Return all the posts submitted during the first 10 minutes of every hour.\n\n r.table("posts").filter(lambda post:\n post["date"].minutes() < 10\n ).run(conn)\n',
),
(
rethinkdb.ast.RqlQuery.month,
b'time.month() -> number\n\nReturn the month of a time object as a number between 1 and 12. For your convenience, the terms r.january, r.february etc. are defined and map to the appropriate integer.\n\n*Example* Retrieve all the users who were born in November.\n\n r.table("users").filter(\n r.row["birthdate"].month() == 11\n )\n\n*Example* Retrieve all the users who were born in November.\n\n r.table("users").filter(\n r.row["birthdate"].month() == r.november\n )\n\n',
),
(
rethinkdb.now,
b'r.now() -> time\n\nReturn a time object representing the current time in UTC. The command now() is computed once when the server receives the query, so multiple instances of r.now() will always return the same time inside a query.\n\n*Example* Add a new user with the time at which he subscribed.\n\n r.table("users").insert({\n "name": "John",\n "subscription_date": r.now()\n }).run(conn)\n\n',
),
(
rethinkdb.ast.RqlQuery.seconds,
b'time.seconds() -> number\n\nReturn the seconds in a time object as a number between 0 and 59.999 (double precision).\n\n*Example* Return the post submitted during the first 30 seconds of every minute.\n\n r.table("posts").filter(lambda post:\n post["date"].seconds() < 30\n ).run(conn)\n\n',
),
(
rethinkdb.time,
b'r.time(year, month, day[, hour, minute, second], timezone)\n -> time\n\nCreate a time object for a specific time.\n\nA few restrictions exist on the arguments:\n\n- `year` is an integer between 1400 and 9,999.\n- `month` is an integer between 1 and 12.\n- `day` is an integer between 1 and 31.\n- `hour` is an integer.\n- `minutes` is an integer.\n- `seconds` is a double. Its value will be rounded to three decimal places\n(millisecond-precision).\n- `timezone` can be `\'Z\'` (for UTC) or a string with the format `\xc2\xb1[hh]:[mm]`.\n\n*Example* Update the birthdate of the user "John" to November 3rd, 1986 UTC.\n\n r.table("user").get("John").update({"birthdate": r.time(1986, 11, 3, \'Z\')}).run(conn)\n\n',
),
(
rethinkdb.ast.RqlQuery.time_of_day,
b'time.time_of_day() -> number\n\nReturn the number of seconds elapsed since the beginning of the day stored in the time object.\n\n*Example* Retrieve posts that were submitted before noon.\n\n r.table("posts").filter(\n r.row["date"].time_of_day() <= 12*60*60\n ).run(conn)\n\n',
),
(
rethinkdb.ast.RqlQuery.timezone,
b'time.timezone() -> string\n\nReturn the timezone of the time object.\n\n*Example* Return all the users in the "-07:00" timezone.\n\n r.table("users").filter(lambda user:\n user["subscriptionDate"].timezone() == "-07:00"\n )\n\n',
),
(
rethinkdb.ast.RqlQuery.to_epoch_time,
b"time.to_epoch_time() -> number\n\nConvert a time object to its epoch time.\n\n*Example* Return the current time in seconds since the Unix Epoch with millisecond-precision.\n\n r.now().to_epoch_time()\n\n",
),
(
rethinkdb.ast.RqlQuery.to_iso8601,
b'time.to_iso8601() -> string\n\nConvert a time object to a string in ISO 8601 format.\n\n*Example* Return the current ISO 8601 time.\n\n > r.now().to_iso8601().run(conn)\n \n "2015-04-20T18:37:52.690+00:00"\n\n',
),
(
rethinkdb.ast.RqlQuery.year,
b'time.year() -> number\n\nReturn the year of a time object.\n\n*Example* Retrieve all the users born in 1986.\n\n r.table("users").filter(lambda user:\n user["birthdate"].year() == 1986\n ).run(conn)\n\n',
),
(
rethinkdb.ast.RqlQuery.append,
b"array.append(value) -> array\n\nAppend a value to an array.\n\n*Example* Retrieve Iron Man's equipment list with the addition of some new boots.\n\n r.table('marvel').get('IronMan')['equipment'].append('newBoots').run(conn)\n\n",
),
(
rethinkdb.ast.RqlQuery.__getitem__,
b"sequence[attr] -> sequence\nsingleSelection[attr] -> value\nobject[attr] -> value\narray[index] -> value\n\nGet a single field from an object. If called on a sequence, gets that field from every object in the sequence, skipping objects that lack it.\n\n*Example* What was Iron Man's first appearance in a comic?\n\n r.table('marvel').get('IronMan')['firstAppearance'].run(conn)\n\n<!-- stop -->\n\nThe `[]` command also accepts integer arguments as array offsets, like the [nth](http://rethinkdb.com/api/python/nth) command.\n\n*Example* Get the fourth element in a sequence. (The first element is position `0`, so the fourth element is position `3`.)\n\n r.expr([10, 20, 30, 40, 50])[3]\n \n 40\n",
),
(
rethinkdb.ast.RqlQuery.change_at,
b'array.change_at(index, value) -> array\n\nChange a value in an array at a given index. Returns the modified array.\n\n*Example* Bruce Banner hulks out.\n\n r.expr(["Iron Man", "Bruce", "Spider-Man"]).change_at(1, "Hulk").run(conn)\n',
),
(
rethinkdb.ast.RqlQuery.delete_at,
b"array.delete_at(index [,endIndex]) -> array\n\nRemove one or more elements from an array at a given index. Returns the modified array. (Note: `delete_at` operates on arrays, not documents; to delete documents, see the [delete](http://rethinkdb.com/api/python/delete) command.)\n\nIf only `index` is specified, `delete_at` removes the element at that index. If both `index` and `end_index` are specified, `delete_at` removes the range of elements between `index` and `end_index`, inclusive of `index` but not inclusive of `end_index`.\n\nIf `end_index` is specified, it must not be less than `index`. Both `index` and `end_index` must be within the array's bounds (i.e., if the array has 10 elements, an `index` or `end_index` of 10 or higher is invalid).\n\nBy using a negative `index` you can delete from the end of the array. `-1` is the last element in the array, `-2` is the second-to-last element, and so on. You may specify a negative `end_index`, although just as with a positive value, this will not be inclusive. The range `(2,-1)` specifies the third element through the next-to-last element.\n\n*Example* Delete the second element of an array.\n\n > r.expr(['a','b','c','d','e','f']).delete_at(1).run(conn)\n \n ['a', 'c', 'd', 'e', 'f']\n\n*Example* Delete the second and third elements of an array.\n\n > r.expr(['a','b','c','d','e','f']).delete_at(1,3).run(conn)\n \n ['a', 'd', 'e', 'f']\n\n*Example* Delete the next-to-last element of an array.\n\n > r.expr(['a','b','c','d','e','f']).delete_at(-2).run(conn)\n \n ['a', 'b', 'c', 'd', 'f']\n\n*Example* Delete a comment on a post.\n\nGiven a post document such as:\n\n{\n id: '4cf47834-b6f9-438f-9dec-74087e84eb63',\n title: 'Post title',\n author: 'Bob',\n comments: [\n { author: 'Agatha', text: 'Comment 1' },\n { author: 'Fred', text: 'Comment 2' }\n ]\n}\n\nThe second comment can be deleted by using `update` and `delete_at` together.\n\n r.table('posts').get('4cf47834-b6f9-438f-9dec-74087e84eb63').update(\n lambda post: { 'comments': post['comments'].delete_at(1) }\n ).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.difference,
b"array.difference(array) -> array\n\nRemove the elements of one array from another array.\n\n*Example* Retrieve Iron Man's equipment list without boots.\n\n r.table('marvel').get('IronMan')['equipment'].difference(['Boots']).run(conn)\n\n*Example* Remove Iron Man's boots from his equipment.\n\n r.table('marvel').get('IronMan')['equipment'].update(lambda doc:\n {'equipment': doc['equipment'].difference(['Boots'])}\n ).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.get_field,
b"sequence.get_field(attr) -> sequence\nsingleSelection.get_field(attr) -> value\nobject.get_field(attr) -> value\n\nGet a single field from an object. If called on a sequence, gets that field from every\nobject in the sequence, skipping objects that lack it.\n\n*Example* What was Iron Man's first appearance in a comic?\n\n r.table('marvel').get('IronMan').get_field('firstAppearance').run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.has_fields,
b"sequence.has_fields([selector1, selector2...]) -> stream\narray.has_fields([selector1, selector2...]) -> array\nobject.has_fields([selector1, selector2...]) -> boolean\n\nTest if an object has one or more fields. An object has a field if it has that key and the key has a non-null value. For instance, the object `{'a': 1,'b': 2,'c': null}` has the fields `a` and `b`.\n\nWhen applied to a single object, `has_fields` returns `true` if the object has the fields and `false` if it does not. When applied to a sequence, it will return a new sequence (an array or stream) containing the elements that have the specified fields.\n\n*Example* Return the players who have won games.\n\n r.table('players').has_fields('games_won').run(conn)\n\n*Example* Return the players who have *not* won games. To do this, use `has_fields` with [not](http://rethinkdb.com/api/python/not), wrapped with [filter](http://rethinkdb.com/api/python/filter).\n\n r.table('players').filter(~r.row.has_fields('games_won')).run(conn)\n\n*Example* Test if a specific player has won any games.\n\n r.table('players').get(\n 'b5ec9714-837e-400c-aa74-dbd35c9a7c4c').has_fields('games_won').run(conn)\n\n**Nested Fields**\n\n`has_fields` lets you test for nested fields in objects. If the value of a field is itself a set of key/value pairs, you can test for the presence of specific keys.\n\n*Example* In the `players` table, the `games_won` field contains one or more fields for kinds of games won:\n\n {\n 'games_won': {\n 'playoffs': 2,\n 'championships': 1\n }\n }\n\nReturn players who have the \"championships\" field.\n\n r.table('players').has_fields({'games_won': {'championships': True}}).run(conn)\n\nNote that `True` in the example above is testing for the existence of `championships` as a field, not testing to see if the value of the `championships` field is set to `true`. There's a more convenient shorthand form available. (See [pluck](http://rethinkdb.com/api/python/pluck) for more details on this.)\n\n r.table('players').has_fields({'games_won': 'championships'}).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.insert_at,
b'array.insert_at(index, value) -> array\n\nInsert a value in to an array at a given index. Returns the modified array.\n\n*Example* Hulk decides to join the avengers.\n\n r.expr(["Iron Man", "Spider-Man"]).insert_at(1, "Hulk").run(conn)\n\n',
),
(
rethinkdb.ast.RqlQuery.keys,
b'singleSelection.keys() -> array\nobject.keys() -> array\n\nReturn an array containing all of an object\'s keys. Note that the keys will be sorted as described in [ReQL data types](http://rethinkdb.com/docs/data-types/#sorting-order) (for strings, lexicographically).\n\n*Example* Get all the keys from a table row.\n\n # row: { "id": 1, "mail": "fred@example.com", "name": "fred" }\n \n r.table(\'users\').get(1).keys().run(conn)\n \n > [ "id", "mail", "name" ]\n',
),
(
rethinkdb.literal,
b'r.literal(object) -> special\n\nReplace an object in a field instead of merging it with an existing object in a `merge` or `update` operation. = Using `literal` with no arguments in a `merge` or `update` operation will remove the corresponding field.\n\nAssume your users table has this structure:\n\n [\n {\n "id": 1,\n "name": "Alice",\n "data": {\n "age": 18,\n "city": "Dallas"\n }\n } \n ...\n ]\n\nUsing `update` to modify the `data` field will normally merge the nested documents:\n\n r.table(\'users\').get(1).update({ \'data\': { \'age\': 19, \'job\': \'Engineer\' } }).run(conn)\n \n {\n "id": 1,\n "name": "Alice",\n "data": {\n "age": 19,\n "city": "Dallas",\n "job": "Engineer"\n }\n } \n\nThat will preserve `city` and other existing fields. But to replace the entire `data` document with a new object, use `literal`.\n\n*Example* Replace one nested document with another rather than merging the fields.\n\n r.table(\'users\').get(1).update({ \'data\': r.literal({ \'age\': 19, \'job\': \'Engineer\' }) }).run(conn)\n \n {\n "id": 1,\n "name": "Alice",\n "data": {\n "age": 19,\n "job": "Engineer"\n }\n } \n\n*Example* Use `literal` to remove a field from a document.\n\n r.table(\'users\').get(1).merge({ "data": r.literal() }).run(conn)\n \n {\n "id": 1,\n "name": "Alice"\n }\n',
),
(
rethinkdb.ast.RqlQuery.merge,
b"singleSelection.merge([object | function, object | function, ...]) -> object\nobject.merge([object | function, object | function, ...]) -> object\nsequence.merge([object | function, object | function, ...]) -> stream\narray.merge([object | function, object | function, ...]) -> array\n\nMerge two or more objects together to construct a new object with properties from all. When there is a conflict between field names, preference is given to fields in the rightmost object in the argument list `merge` also accepts a subquery function that returns an object, which will be used similarly to a [map](http://rethinkdb.com/api/python/map/) function.\n\n*Example* Equip Thor for battle.\n\n r.table('marvel').get('thor').merge(\n r.table('equipment').get('hammer'),\n r.table('equipment').get('pimento_sandwich')\n ).run(conn)\n\n*Example* Equip every hero for battle, using a subquery function to retrieve their weapons.\n\n r.table('marvel').merge(lambda hero:\n { 'weapons': r.table('weapons').get(hero['weapon_id']) }\n ).run(conn)\n\n*Example* Use `merge` to join each blog post with its comments.\n\nNote that the sequence being merged—in this example, the comments—must be coerced from a selection to an array. Without `coerce_to` the operation will throw an error (\"Expected type DATUM but found SELECTION\").\n\n r.table('posts').merge(lambda post:\n { 'comments': r.table('comments').get_all(post['id'],\n index='post_id').coerce_to('array') }\n ).run(conn)\n\n*Example* Merge can be used recursively to modify object within objects.\n\n r.expr({'weapons' : {'spectacular graviton beam' : {'dmg' : 10, 'cooldown' : 20}}}).merge(\n {'weapons' : {'spectacular graviton beam' : {'dmg' : 10}}}\n ).run(conn)\n\n*Example* To replace a nested object with another object you can use the literal keyword.\n\n r.expr({'weapons' : {'spectacular graviton beam' : {'dmg' : 10, 'cooldown' : 20}}}).merge(\n {'weapons' : r.literal({'repulsor rays' : {'dmg' : 3, 'cooldown' : 0}})}\n ).run(conn)\n\n*Example* Literal can be used to remove keys from an object as well.\n\n r.expr({'weapons' : {'spectacular graviton beam' : {'dmg' : 10, 'cooldown' : 20}}}).merge(\n {'weapons' : {'spectacular graviton beam' : r.literal()}}\n ).run(conn)\n\n",
),
(
rethinkdb.object,
b"r.object([key, value,]...) -> object\n\nCreates an object from a list of key-value pairs, where the keys must\nbe strings. `r.object(A, B, C, D)` is equivalent to\n`r.expr([[A, B], [C, D]]).coerce_to('OBJECT')`.\n\n*Example* Create a simple object.\n\n > r.object('id', 5, 'data', ['foo', 'bar']).run(conn)\n {'data': [\"foo\", \"bar\"], 'id': 5}\n",
),
(
rethinkdb.ast.RqlQuery.pluck,
b"sequence.pluck([selector1, selector2...]) -> stream\narray.pluck([selector1, selector2...]) -> array\nobject.pluck([selector1, selector2...]) -> object\nsingleSelection.pluck([selector1, selector2...]) -> object\n\nPlucks out one or more attributes from either an object or a sequence of objects\n(projection).\n\n*Example* We just need information about IronMan's reactor and not the rest of the\ndocument.\n\n r.table('marvel').get('IronMan').pluck('reactorState', 'reactorPower').run(conn)\n\n*Example* For the hero beauty contest we only care about certain qualities.\n\n r.table('marvel').pluck('beauty', 'muscleTone', 'charm').run(conn)\n\n*Example* Pluck can also be used on nested objects.\n\n r.table('marvel').pluck({'abilities' : {'damage' : True, 'mana_cost' : True}, 'weapons' : True}).run(conn)\n\n*Example* The nested syntax can quickly become overly verbose so there's a shorthand\nfor it.\n\n r.table('marvel').pluck({'abilities' : ['damage', 'mana_cost']}, 'weapons').run(conn)\n\nFor more information read the [nested field documentation](http://rethinkdb.com/docs/nested-fields/).\n",
),
(
rethinkdb.ast.RqlQuery.prepend,
b"array.prepend(value) -> array\n\nPrepend a value to an array.\n\n*Example* Retrieve Iron Man's equipment list with the addition of some new boots.\n\n r.table('marvel').get('IronMan')['equipment'].prepend('newBoots').run(conn)\n",
),
(
rethinkdb.row,
b"r.row -> value\n\nReturns the currently visited document.\n\n*Example* Get all users whose age is greater than 5.\n\n r.table('users').filter(r.row['age'] > 5).run(conn)\n\n*Example* Access the attribute 'child' of an embedded document.\n\n r.table('users').filter(r.row['embedded_doc']['child'] > 5).run(conn)\n\n*Example* Add 1 to every element of an array.\n\n r.expr([1, 2, 3]).map(r.row + 1).run(conn)\n\n*Example* For nested queries, use functions instead of `row`.\n\n r.table('users').filter(\n lambda doc: doc['name'] == r.table('prizes').get('winner')\n ).run(conn)\n\n",
),
(
rethinkdb.ast.RqlQuery.set_difference,
b"array.set_difference(array) -> array\n\nRemove the elements of one array from another and return them as a set (an array with\ndistinct values).\n\n*Example* Check which pieces of equipment Iron Man has, excluding a fixed list.\n\n r.table('marvel').get('IronMan')['equipment'].set_difference(['newBoots', 'arc_reactor']).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.set_insert,
b"array.set_insert(value) -> array\n\nAdd a value to an array and return it as a set (an array with distinct values).\n\n*Example* Retrieve Iron Man's equipment list with the addition of some new boots.\n\n r.table('marvel').get('IronMan')['equipment'].set_insert('newBoots').run(conn)\n\n",
),
(
rethinkdb.ast.RqlQuery.set_intersection,
b"array.set_intersection(array) -> array\n\nIntersect two arrays returning values that occur in both of them as a set (an array with\ndistinct values).\n\n*Example* Check which pieces of equipment Iron Man has from a fixed list.\n\n r.table('marvel').get('IronMan')['equipment'].set_intersection(['newBoots', 'arc_reactor']).run(conn)\n\n",
),
(
rethinkdb.ast.RqlQuery.set_union,
b"array.set_union(array) -> array\n\nAdd a several values to an array and return it as a set (an array with distinct values).\n\n*Example* Retrieve Iron Man's equipment list with the addition of some new boots and an arc reactor.\n\n r.table('marvel').get('IronMan')['equipment'].set_union(['newBoots', 'arc_reactor']).run(conn)\n\n",
),
(
rethinkdb.ast.RqlQuery.splice_at,
b'array.splice_at(index, array) -> array\n\nInsert several values in to an array at a given index. Returns the modified array.\n\n*Example* Hulk and Thor decide to join the avengers.\n\n r.expr(["Iron Man", "Spider-Man"]).splice_at(1, ["Hulk", "Thor"]).run(conn)\n',
),
(
rethinkdb.ast.RqlQuery.values,
b'singleSelection.values() -> array\nobject.values() -> array\n\nReturn an array containing all of an object\'s values. `values()` guarantees the values will come out in the same order as [keys](http://rethinkdb.com/api/python/keys).\n\n*Example* Get all of the values from a table row.\n\n # row: { "id": 1, "mail": "fred@example.com", "name": "fred" }\n \n r.table(\'users\').get(1).values().run(conn)\n \n > [ 1, "fred@example.com", "fred" ]\n',
),
(
rethinkdb.ast.RqlQuery.without,
b"sequence.without([selector1, selector2...]) -> stream\narray.without([selector1, selector2...]) -> array\nsingleSelection.without([selector1, selector2...]) -> object\nobject.without([selector1, selector2...]) -> object\n\nThe opposite of pluck; takes an object or a sequence of objects, and returns them with\nthe specified paths removed.\n\n*Example* Since we don't need it for this computation we'll save bandwidth and leave\nout the list of IronMan's romantic conquests.\n\n r.table('marvel').get('IronMan').without('personalVictoriesList').run(conn)\n\n*Example* Without their prized weapons, our enemies will quickly be vanquished.\n\n r.table('enemies').without('weapons').run(conn)\n\n*Example* Nested objects can be used to remove the damage subfield from the weapons and abilities fields.\n\n r.table('marvel').without({'weapons' : {'damage' : True}, 'abilities' : {'damage' : True}}).run(conn)\n\n*Example* The nested syntax can quickly become overly verbose so there's a shorthand for it.\n\n r.table('marvel').without({'weapons' : 'damage', 'abilities' : 'damage'}).run(conn)\n\n",
),
(
rethinkdb.circle,
b"r.circle([longitude, latitude], radius[, num_vertices=32, geo_system='WGS84', unit='m', fill=True]) -> geometry\nr.circle(point, radius[, {num_vertices=32, geo_system='WGS84', unit='m', fill=True]) -> geometry\n\nConstruct a circular line or polygon. A circle in RethinkDB is a polygon or line *approximating* a circle of a given radius around a given center, consisting of a specified number of vertices (default 32).\n\nThe center may be specified either by two floating point numbers, the latitude (−90 to 90) and longitude (−180 to 180) of the point on a perfect sphere (see [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system), or by a point object. The radius is a floating point number whose units are meters by default, although that may be changed with the `unit` argument.\n\nOptional arguments available with `circle` are:\n\n* `num_vertices`: the number of vertices in the polygon or line. Defaults to 32.\n* `geo_system`: the reference ellipsoid to use for geographic coordinates. Possible values are `WGS84` (the default), a common standard for Earth's geometry, or `unit_sphere`, a perfect sphere of 1 meter radius.\n* `unit`: Unit for the radius distance. Possible values are `m` (meter, the default), `km` (kilometer), `mi` (international mile), `nm` (nautical mile), `ft` (international foot).\n* `fill`: if `True` (the default) the circle is filled, creating a polygon; if `False` the circle is unfilled (creating a line).\n\n*Example* Define a circle.\n\n r.table('geo').insert({\n 'id': 300,\n 'name': 'Hayes Valley',\n 'neighborhood': r.circle([-122.423246, 37.779388], 1000)\n }).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.distance,
b"geometry.distance(geometry[, geo_system='WGS84', unit='m']) -> number\nr.distance(geometry, geometry[, geo_system='WGS84', unit='m']) -> number\n\nCompute the distance between a point and another geometry object. At least one of the geometry objects specified must be a point.\n\nOptional arguments available with `distance` are:\n\n* `geo_system`: the reference ellipsoid to use for geographic coordinates. Possible values are `WGS84` (the default), a common standard for Earth's geometry, or `unit_sphere`, a perfect sphere of 1 meter radius.\n* `unit`: Unit to return the distance in. Possible values are `m` (meter, the default), `km` (kilometer), `mi` (international mile), `nm` (nautical mile), `ft` (international foot).\n\nIf one of the objects is a polygon or a line, the point will be projected onto the line or polygon assuming a perfect sphere model before the distance is computed (using the model specified with `geo_system`). As a consequence, if the polygon or line is extremely large compared to Earth's radius and the distance is being computed with the default WGS84 model, the results of `distance` should be considered approximate due to the deviation between the ellipsoid and spherical models.\n\n*Example* Compute the distance between two points on the Earth in kilometers.\n\n > point1 = r.point(-122.423246, 37.779388)\n > point2 = r.point(-117.220406, 32.719464)\n > r.distance(point1, point2, unit='km').run(conn)\n \n 734.1252496021841\n",
),
(
rethinkdb.ast.RqlQuery.fill,
b"line.fill() -> polygon\n\nConvert a Line object into a Polygon object. If the last point does not specify the same coordinates as the first point, `polygon` will close the polygon by connecting them.\n\nLongitude (−180 to 180) and latitude (−90 to 90) of vertices are plotted on a perfect sphere. See [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system.\n\nIf the last point does not specify the same coordinates as the first point, `polygon` will close the polygon by connecting them. You cannot directly construct a polygon with holes in it using `polygon`, but you can use [polygon_sub](http://rethinkdb.com/api/python/polygon_sub) to use a second polygon within the interior of the first to define a hole.\n\n*Example* Create a line object and then convert it to a polygon.\n\n r.table('geo').insert({\n 'id': 201,\n 'rectangle': r.line(\n [-122.423246, 37.779388],\n [-122.423246, 37.329898],\n [-121.886420, 37.329898],\n [-121.886420, 37.779388]\n )\n }).run(conn)\n \n r.table('geo').get(201).update({\n 'rectangle': r.row['rectangle'].fill()\n }, non_atomic=True).run(conn)\n",
),
(
rethinkdb.geojson,
b"r.geojson(geojson) -> geometry\n\nConvert a [GeoJSON](http://geojson.org) object to a ReQL geometry object.\n\nRethinkDB only allows conversion of GeoJSON objects which have ReQL equivalents: Point, LineString, and Polygon. MultiPoint, MultiLineString, and MultiPolygon are not supported. (You could, however, store multiple points, lines and polygons in an array and use a geospatial multi index with them.)\n\nOnly longitude/latitude coordinates are supported. GeoJSON objects that use Cartesian coordinates, specify an altitude, or specify their own coordinate reference system will be rejected.\n\n*Example* Convert a GeoJSON object to a ReQL geometry object.\n\n geo_json = {\n 'type': 'Point',\n 'coordinates': [ -122.423246, 37.779388 ]\n }\n r.table('geo').insert({\n 'id': 'sfo',\n 'name': 'San Francisco',\n 'location': r.geojson(geo_json)\n }).run(conn)\n",
),
(
rethinkdb.ast.Table.get_intersecting,
b"table.get_intersecting(geometry, index='indexname') -> selection<stream>\n\nGet all documents where the given geometry object intersects the geometry object of the requested geospatial index.\n\nThe `index` argument is mandatory. This command returns the same results as `table.filter(r.row('index').intersects(geometry))`. The total number of results is limited to the array size limit which defaults to 100,000, but can be changed with the `array_limit` option to [run](http://rethinkdb.com/api/python/run).\n\n*Example* Which of the locations in a list of parks intersect `circle1`?\n\n circle1 = r.circle([-117.220406, 32.719464], 10, unit='mi')\n r.table('parks').get_intersecting(circle1, index='area').run(conn)\n",
),
(
rethinkdb.ast.Table.get_nearest,
b"table.get_nearest(point, index='indexname'[, max_results=100, max_dist=100000, unit='m', geo_system='WGS84']) -> array\n\nGet all documents where the specified geospatial index is within a certain distance of the specified point (default 100 kilometers).\n\nThe `index` argument is mandatory. Optional arguments are:\n\n* `max_results`: the maximum number of results to return (default 100).\n* `unit`: Unit for the distance. Possible values are `m` (meter, the default), `km` (kilometer), `mi` (international mile), `nm` (nautical mile), `ft` (international foot).\n* `max_dist`: the maximum distance from an object to the specified point (default 100 km).\n* `geo_system`: the reference ellipsoid to use for geographic coordinates. Possible values are `WGS84` (the default), a common standard for Earth's geometry, or `unit_sphere`, a perfect sphere of 1 meter radius.\n\nThe return value will be an array of two-item objects with the keys `dist` and `doc`, set to the distance between the specified point and the document (in the units specified with `unit`, defaulting to meters) and the document itself, respectively.\n\n*Example* Return a list of enemy hideouts within 5000 meters of the secret base.\n\n secret_base = r.point(-122.422876, 37.777128)\n r.table('hideouts').get_nearest(secret_base, index='location',\n max_dist=5000).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.includes,
b"sequence.includes(geometry) -> sequence\ngeometry.includes(geometry) -> bool\n\nTests whether a geometry object is completely contained within another. When applied to a sequence of geometry objects, `includes` acts as a [filter](http://rethinkdb.com/api/python/filter), returning a sequence of objects from the sequence that include the argument.\n\n*Example* Is `point2` included within a 2000-meter circle around `point1`?\n\n > point1 = r.point(-117.220406, 32.719464)\n > point2 = r.point(-117.206201, 32.725186)\n > r.circle(point1, 2000).includes(point2).run(conn)\n \n True\n\n*Example* Which of the locations in a list of parks include `circle1`?\n\n circle1 = r.circle([-117.220406, 32.719464], 10, unit='mi')\n r.table('parks')['area'].includes(circle1).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.intersects,
b"sequence.intersects(geometry) -> sequence\ngeometry.intersects(geometry) -> bool\nr.intersects(sequence, geometry) -> sequence\nr.intersects(geometry, geometry) -> bool\n\nTests whether two geometry objects intersect with one another. When applied to a sequence of geometry objects, `intersects` acts as a [filter](http://rethinkdb.com/api/python/filter), returning a sequence of objects from the sequence that intersect with the argument.\n\n*Example* Is `point2` within a 2000-meter circle around `point1`?\n\n > point1 = r.point(-117.220406, 32.719464)\n > point2 = r.point(-117.206201, 32.725186)\n > r.circle(point1, 2000).intersects(point2).run(conn)\n \n True\n\n*Example* Which of the locations in a list of parks intersect `circle1`?\n\n circle1 = r.circle([-117.220406, 32.719464], 10, unit='mi')\n r.table('parks')('area').intersects(circle1).run(conn)\n",
),
(
rethinkdb.line,
b"r.line([lon1, lat1], [lon2, lat2], ...) -> line\nr.line(point1, point2, ...) -> line\n\nConstruct a geometry object of type Line. The line can be specified in one of two ways:\n\n* Two or more two-item arrays, specifying latitude and longitude numbers of the line's vertices;\n* Two or more [Point](http://rethinkdb.com/api/python/point) objects specifying the line's vertices.\n\n<!-- break -->\n\nLongitude (−180 to 180) and latitude (−90 to 90) of vertices are plotted on a perfect sphere. See [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system.\n\n*Example* Define a line.\n\n r.table('geo').insert({\n 'id': 101,\n 'route': r.line([-122.423246, 37.779388], [-121.886420, 37.329898])\n }).run(conn)\n\n*Example* Define a line using an array of points.\n\nYou can use the [args](http://rethinkdb.com/api/python/args) command to pass an array of Point objects (or latitude-longitude pairs) to `line`.\n\n var route = [\n [-122.423246, 37.779388],\n [-121.886420, 37.329898]\n ]\n r.table('geo').insert({\n 'id': 102,\n 'route': r.line(r.args(route))\n }).run(conn)\n",
),
(
rethinkdb.point,
b"r.point(longitude, latitude) -> point\n\nConstruct a geometry object of type Point. The point is specified by two floating point numbers, the longitude (−180 to 180) and latitude (−90 to 90) of the point on a perfect sphere. See [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system.\n\n*Example* Define a point.\n\n r.table('geo').insert({\n 'id': 1,\n 'name': 'San Francisco',\n 'location': r.point(-122.423246, 37.779388)\n }).run(conn)\n",
),
(
rethinkdb.polygon,
b"r.polygon([lon1, lat1], [lon2, lat2], [lon3, lat3], ...) -> polygon\nr.polygon(point1, point2, point3, ...) -> polygon\n\nConstruct a geometry object of type Polygon. The Polygon can be specified in one of two ways:\n\n* Three or more two-item arrays, specifying latitude and longitude numbers of the polygon's vertices;\n* Three or more [Point](http://rethinkdb.com/api/python/point) objects specifying the polygon's vertices.\n\n<!-- break -->\n\nLongitude (−180 to 180) and latitude (−90 to 90) of vertices are plotted on a perfect sphere. See [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system.\n\nIf the last point does not specify the same coordinates as the first point, `polygon` will close the polygon by connecting them. You cannot directly construct a polygon with holes in it using `polygon`, but you can use [polygon_sub](http://rethinkdb.com/api/python/polygon_sub) to use a second polygon within the interior of the first to define a hole.\n\n*Example* Define a polygon.\n\n r.table('geo').insert({\n 'id': 101,\n 'rectangle': r.polygon(\n [-122.423246, 37.779388],\n [-122.423246, 37.329898],\n [-121.886420, 37.329898],\n [-121.886420, 37.779388]\n )\n }).run(conn)\n\n*Example* Define a polygon using an array of vertices.\n\nYou can use the [args](http://rethinkdb.com/api/python/args) command to pass an array of Point objects (or latitude-longitude pairs) to `polygon`.\n\n vertices = [\n [-122.423246, 37.779388],\n [-122.423246, 37.329898],\n [-121.886420, 37.329898],\n [-121.886420, 37.779388]\n ]\n r.table('geo').insert({\n 'id': 102,\n 'rectangle': r.polygon(r.args(vertices))\n }).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.polygon_sub,
b'polygon1.polygon_sub(polygon2) -> polygon\n\nUse `polygon2` to "punch out" a hole in `polygon1`. `polygon2` must be completely contained within `polygon1` and must have no holes itself (it must not be the output of `polygon_sub` itself).\n\n*Example* Define a polygon with a hole punched in it.\n\n outer_polygon = r.polygon(\n [-122.4, 37.7],\n [-122.4, 37.3],\n [-121.8, 37.3],\n [-121.8, 37.7]\n )\n inner_polygon = r.polygon(\n [-122.3, 37.4],\n [-122.3, 37.6],\n [-122.0, 37.6],\n [-122.0, 37.4]\n )\n outer_polygon.polygon_sub(inner_polygon).run(conn)\n',
),
(
rethinkdb.ast.RqlQuery.to_geojson,
b"geometry.to_geojson() -> object\n\nConvert a ReQL geometry object to a [GeoJSON](http://geojson.org) object.\n\n*Example* Convert a ReQL geometry object to a GeoJSON object.\n\n > r.table('geo').get('sfo')['location'].to_geojson().run(conn)\n \n {\n 'type': 'Point',\n 'coordinates': [ -122.423246, 37.779388 ]\n }\n",
),
(
rethinkdb.ast.RqlQuery.eq_join,
b'sequence.eq_join(left_field, right_table[, index=\'id\', ordered=False]) -> sequence\nsequence.eq_join(predicate_function, right_table[, index=\'id\', ordered=False]) -> sequence\n\nJoin tables using a field or function on the left-hand sequence matching primary keys or secondary indexes on the right-hand table. `eq_join` is more efficient than other ReQL join types, and operates much faster. Documents in the result set consist of pairs of left-hand and right-hand documents, matched when the field on the left-hand side exists and is non-null and an entry with that field\'s value exists in the specified index on the right-hand side.\n\nThe result set of `eq_join` is a stream or array of objects. Each object in the returned set will be an object of the form `{ left: <left-document>, right: <right-document> }`, where the values of `left` and `right` will be the joined documents. Use the <code><a href="/api/python/zip/">zip</a></code> command to merge the `left` and `right` fields together.\n\nThe results from `eq_join` are, by default, not ordered. The optional `ordered=True` parameter will cause `eq_join` to order the output based on the left side input stream. (If there are multiple matches on the right side for a document on the left side, their order is not guaranteed even if `ordered` is `True`.) Requiring ordered results can significantly slow down `eq_join`, and in many circumstances this ordering will not be required. (See the first example, in which ordered results are obtained by using `order_by` after `eq_join`.)\n\nSuppose the players table contains these documents:\n\n [\n { \'id\': 1, \'player\': \'George\', \'gameId\': 1 },\n { \'id\': 2, \'player\': \'Agatha\', \'gameId\': 3 },\n { \'id\': 3, \'player\': \'Fred\', \'gameId\': 2 },\n { \'id\': 4, \'player\': \'Marie\', \'gameId\': 2 },\n { \'id\': 5, \'player\': \'Earnest\', \'gameId\': 1 },\n { \'id\': 6, \'player\': \'Beth\', \'gameId\': 3 }\n ]\n\nThe games table contains these documents:\n\n [\n { \'id\': 1, \'field\': \'Little Delving\' },\n { \'id\': 2, \'field\': \'Rushock Bog\' },\n { \'id\': 3, \'field\': \'Bucklebury\' }\n ]\n\n**Example:** Match players with the games they\'ve played against one another.\n\nJoin these tables using `game_id` on the player table and `id` on the games table:\n\n r.table(\'players\').eq_join(\'game_id\', r.table(\'games\')).run(conn)\n\nThis will return a result set such as the following:\n\n [\n {\n "left" : { "gameId" : 3, "id" : 2, "player" : "Agatha" },\n "right" : { "id" : 3, "field" : "Bucklebury" }\n },\n {\n "left" : { "gameId" : 2, "id" : 3, "player" : "Fred" },\n "right" : { "id" : 2, "field" : "Rushock Bog" }\n },\n ...\n ]\n\n<!-- stop -->\n\nWhat you likely want is the result of using `zip` with that. For clarity, we\'ll use `without` to drop the `id` field from the games table (it conflicts with the `id` field for the players and it\'s redundant anyway), and we\'ll order it by the games.\n\n r.table(\'players\').eq_join(\'game_id\', r.table(\'games\')).without({\'right\': "id"}).zip().order_by(\'game_id\').run(conn)\n \n [\n { "field": "Little Delving", "gameId": 1, "id": 5, "player": "Earnest" },\n { "field": "Little Delving", "gameId": 1, "id": 1, "player": "George" },\n { "field": "Rushock Bog", "gameId": 2, "id": 3, "player": "Fred" },\n { "field": "Rushock Bog", "gameId": 2, "id": 4, "player": "Marie" },\n { "field": "Bucklebury", "gameId": 3, "id": 6, "player": "Beth" },\n { "field": "Bucklebury", "gameId": 3, "id": 2, "player": "Agatha" }\n ]\n\nFor more information, see [Table joins in RethinkDB](http://rethinkdb.com/docs/table-joins/).\n\n**Example:** Use a secondary index on the right table rather than the primary key. If players have a secondary index on their cities, we can get a list of arenas with players in the same area.\n\n r.table(\'arenas\').eq_join(\'city_id\', r.table(\'arenas\'), index=\'city_id\').run(conn)\n\n**Example:** Use a nested key as the join field. Suppose the documents in the players table were structured like this:\n\n { \'id\': 1, \'player\': \'George\', \'game\': {\'id\': 1} },\n { \'id\': 2, \'player\': \'Agatha\', \'game\': {\'id\': 3} },\n ...\n\nSimply specify the field using the `row` command instead of a string.\n\n r.table(\'players\').eq_join(r.row[\'game\'][\'id\'], r.table(\'games\')).without({\'right\': \'id\'}).zip().run(conn)\n \n [\n { "field": "Little Delving", "game": { "id": 1 }, "id": 5, "player": "Earnest" },\n { "field": "Little Delving", "game": { "id": 1 }, "id": 1, "player": "George" },\n ...\n ]\n\n**Example:** Use a function instead of a field to join on a more complicated expression. Suppose the players have lists of favorite games ranked in order in a field such as `"favorites": [3, 2, 1]`. Get a list of players and their top favorite:\n\n r.table(\'players3\').eq_join(\n lambda player: player[\'favorites\'].nth(0),\n r.table(\'games\')\n ).without([{\'left\': [\'favorites\', \'game_id\', \'id\']}, {\'right\': \'id\'}]).zip()\n\nResult:\n\n [\n \t{ "field": "Rushock Bog", "name": "Fred" },\n \t{ "field": "Little Delving", "name": "George" },\n \t...\n ]\n',
),
(
rethinkdb.ast.RqlQuery.inner_join,
b"sequence.inner_join(other_sequence, predicate_function) -> stream\narray.inner_join(other_sequence, predicate_function) -> array\n\nReturns an inner join of two sequences.\n\nThe returned sequence represents an intersection of the left-hand sequence and the right-hand sequence: each row of the left-hand sequence will be compared with each row of the right-hand sequence to find all pairs of rows which satisfy the predicate. Each matched pair of rows of both sequences are combined into a result row. In most cases, you will want to follow the join with [zip](http://rethinkdb.com/api/python/zip) to combine the left and right results.\n\n*Example* Return a list of all matchups between Marvel and DC heroes in which the DC hero could beat the Marvel hero in a fight.\n\n r.table('marvel').inner_join(r.table('dc'),\n lambda marvel_row, dc_row: marvel_row['strength'] < dc_row['strength']\n ).zip().run(conn)\n\n<!-- stop -->\n\n(Compare this to an [outer_join](http://rethinkdb.com/api/python/outer_join) with the same inputs and predicate, which would return a list of *all* Marvel heroes along with any DC heroes with a higher strength.)",
),
(
rethinkdb.ast.RqlQuery.outer_join,
b"sequence.outer_join(other_sequence, predicate_function) -> stream\narray.outer_join(other_sequence, predicate_function) -> array\n\nReturns a left outer join of two sequences. The returned sequence represents a union of the left-hand sequence and the right-hand sequence: all documents in the left-hand sequence will be returned, each matched with a document in the right-hand sequence if one satisfies the predicate condition. In most cases, you will want to follow the join with [zip](http://rethinkdb.com/api/python/zip) to combine the left and right results.\n\n*Example* Return a list of all Marvel heroes, paired with any DC heroes who could beat them in a fight.\n\n r.table('marvel').outer_join(r.table('dc'),\n lambda marvel_row, dc_row: marvel_row['strength'] < dc_row['strength']\n ).zip().run(conn)\n\n(Compare this to an [inner_join](http://rethinkdb.com/api/python/inner_join) with the same inputs and predicate, which would return a list only of the matchups in which the DC hero has the higher strength.)\n",
),
(
rethinkdb.ast.RqlQuery.zip,
b"stream.zip() -> stream\narray.zip() -> array\n\nUsed to 'zip' up the result of a join by merging the 'right' fields into 'left' fields of each member of the sequence.\n\n*Example* 'zips up' the sequence by merging the left and right fields produced by a join.\n\n r.table('marvel').eq_join('main_dc_collaborator', r.table('dc')).zip().run(conn)\n",
),
(
rethinkdb.db_create,
b'r.db_create(db_name) -> object\n\nCreate a database. A RethinkDB database is a collection of tables, similar to\nrelational databases.\n\nIf successful, the command returns an object with two fields:\n\n* `dbs_created`: always `1`.\n* `config_changes`: a list containing one object with two fields, `old_val` and `new_val`:\n * `old_val`: always `None`.\n * `new_val`: the database\'s new [config](http://rethinkdb.com/api/python/config) value.\n\nIf a database with the same name already exists, the command throws `ReqlRuntimeError`.\n\nNote: Only alphanumeric characters and underscores are valid for the database name.\n\n*Example* Create a database named \'superheroes\'.\n\n r.db_create(\'superheroes\').run(conn)\n \n {\n "config_changes": [\n {\n "new_val": {\n "id": "e4689cfc-e903-4532-a0e6-2d6797a43f07",\n "name": "superheroes"\n },\n "old_val": None\n }\n ],\n "dbs_created": 1\n }\n\n',
),
(
rethinkdb.db_drop,
b'r.db_drop(db_name) -> object\n\nDrop a database. The database, all its tables, and corresponding data will be deleted.\n\nIf successful, the command returns an object with two fields:\n\n* `dbs_dropped`: always `1`.\n* `tables_dropped`: the number of tables in the dropped database.\n* `config_changes`: a list containing one two-field object, `old_val` and `new_val`:\n * `old_val`: the database\'s original [config](http://rethinkdb.com/api/python/config) value.\n * `new_val`: always `None`.\n\nIf the given database does not exist, the command throws `ReqlRuntimeError`.\n\n*Example* Drop a database named \'superheroes\'.\n\n r.db_drop(\'superheroes\').run(conn)\n \n {\n "config_changes": [\n {\n "old_val": {\n "id": "e4689cfc-e903-4532-a0e6-2d6797a43f07",\n "name": "superheroes"\n },\n "new_val": None\n }\n ],\n "tables_dropped": 3,\n "dbs_dropped": 1\n }\n\n',
),
(
rethinkdb.db_list,
b"r.db_list() -> array\n\nList all database names in the system. The result is a list of strings.\n\n*Example* List all databases.\n\n r.db_list().run(conn)\n\n",
),
(
rethinkdb.ast.RqlQuery.changes,
b"stream.changes([options]) -> stream\nsingleSelection.changes([options]) -> stream\n\nTurn a query into a changefeed, an infinite stream of objects representing changes to the query's results as they occur. A changefeed may return changes to a table or an individual document (a \"point\" changefeed). Commands such as `filter` or `map` may be used before the `changes` command to transform or filter the output, and many commands that operate on sequences can be chained after `changes`.\n\nThere are four optional arguments to `changes`.\n\n* `squash`: Controls how change notifications are batched. Acceptable values are `True`, `False` and a numeric value:\n * `True`: When multiple changes to the same document occur before a batch of notifications is sent, the changes are \"squashed\" into one change. The client receives a notification that will bring it fully up to date with the server.\n * `False`: All changes will be sent to the client verbatim. This is the default.\n * `n`: A numeric value (floating point). Similar to `True`, but the server will wait `n` seconds to respond in order to squash as many changes together as possible, reducing network traffic. The first batch will always be returned immediately.\n* `changefeed_queue_size`: the number of changes the server will buffer between client reads before it starts dropping changes and generates an error (default: 100,000).\n* `include_initial`: if `True`, the changefeed stream will begin with the current contents of the table or selection being monitored. These initial results will have `new_val` fields, but no `old_val` fields. The initial results may be intermixed with actual changes, as long as an initial result for the changed document has already been given. If an initial result for a document has been sent and a change is made to that document that would move it to the unsent part of the result set (e.g., a changefeed monitors the top 100 posters, the first 50 have been sent, and poster 48 has become poster 52), an \"uninitial\" notification will be sent, with an `old_val` field but no `new_val` field.\n* `include_states`: if `True`, the changefeed stream will include special status documents consisting of the field `state` and a string indicating a change in the feed's state. These documents can occur at any point in the feed between the notification documents described below. If `include_states` is `False` (the default), the status documents will not be sent.\n* `include_offsets`: if `True`, a changefeed stream on an `order_by.limit` changefeed will include `old_offset` and `new_offset` fields in status documents that include `old_val` and `new_val`. This allows applications to maintain ordered lists of the stream's result set. If `old_offset` is set and not `None`, the element at `old_offset` is being deleted; if `new_offset` is set and not `None`, then `new_val` is being inserted at `new_offset`. Setting `include_offsets` to `True` on a changefeed that does not support it will raise an error.\n\nThere are currently two states:\n\n* `{\"state\": \"initializing\"}` indicates the following documents represent initial values on the feed rather than changes. This will be the first document of a feed that returns initial values.\n* `{\"state\": \"ready\"}` indicates the following documents represent changes. This will be the first document of a feed that does *not* return initial values; otherwise, it will indicate the initial values have all been sent.\n\nIf the table becomes unavailable, the changefeed will be disconnected, and a runtime exception will be thrown by the driver.\n\nChangefeed notifications take the form of a two-field object:\n\n {\n \"old_val\": <document before change>,\n \"new_val\": <document after change>\n }\n\nWhen a document is deleted, `new_val` will be `None`; when a document is inserted, `old_val` will be `None`.\n\nThe server will buffer up to 100,000 elements. If the buffer limit is hit, early changes will be discarded, and the client will receive an object of the form `{\"error\": \"Changefeed cache over array size limit, skipped X elements.\"}` where `X` is the number of elements skipped.\n\nCommands that operate on streams (such as [filter](http://rethinkdb.com/api/python/filter/) or [map](http://rethinkdb.com/api/python/map/)) can usually be chained after `changes`. However, since the stream produced by `changes` has no ending, commands that need to consume the entire stream before returning (such as [reduce](http://rethinkdb.com/api/python/reduce/) or [count](http://rethinkdb.com/api/python/count/)) cannot.\n\n*Example* Subscribe to the changes on a table.\n\nStart monitoring the changefeed in one client:\n\n for change in r.table('games').changes().run(conn):\n print change\n\nAs these queries are performed in a second client, the first client would receive and print the following objects:\n\n > r.table('games').insert({'id': 1}).run(conn)\n {'old_val': None, 'new_val': {'id': 1}}\n \n > r.table('games').get(1).update({'player1': 'Bob'}).run(conn)\n {'old_val': {'id': 1}, 'new_val': {'id': 1, 'player1': 'Bob'}}\n \n > r.table('games').get(1).replace({'id': 1, 'player1': 'Bob', 'player2': 'Alice'}).run(conn)\n {'old_val': {'id': 1, 'player1': 'Bob'},\n 'new_val': {'id': 1, 'player1': 'Bob', 'player2': 'Alice'}}\n \n > r.table('games').get(1).delete().run(conn)\n {'old_val': {'id': 1, 'player1': 'Bob', 'player2': 'Alice'}, 'new_val': None}\n \n > r.table_drop('games').run(conn)\n ReqlRuntimeError: Changefeed aborted (table unavailable)\n\n*Example* Return all the changes that increase a player's score.\n\n r.table('test').changes().filter(\n r.row['new_val']['score'] > r.row['old_val']['score']\n ).run(conn)\n\n*Example* Return all the changes to a specific player's score that increase it past 10.\n\n r.table('test').get(1).filter(r.row['score'].gt(10)).changes().run(conn)\n\n*Example* Return all the inserts on a table.\n\n r.table('test').changes().filter(r.row['old_val'].eq(None)).run(conn)\n\n*Example* Return all the changes to game 1, with state notifications and initial values.\n\n r.table('games').get(1).changes(include_initial=True, include_states=True).run(conn)\n \n # result returned on changefeed\n {\"state\": \"initializing\"}\n {\"new_val\": {\"id\": 1, \"score\": 12, \"arena\": \"Hobbiton Field\"}}\n {\"state\": \"ready\"}\n {\n \t\"old_val\": {\"id\": 1, \"score\": 12, \"arena\": \"Hobbiton Field\"},\n \t\"new_val\": {\"id\": 1, \"score\": 14, \"arena\": \"Hobbiton Field\"}\n }\n {\n \t\"old_val\": {\"id\": 1, \"score\": 14, \"arena\": \"Hobbiton Field\"},\n \t\"new_val\": {\"id\": 1, \"score\": 17, \"arena\": \"Hobbiton Field\", \"winner\": \"Frodo\"}\n }\n\n*Example* Return all the changes to the top 10 games. This assumes the presence of a `score` secondary index on the `games` table.\n\n r.table('games').order_by(index=r.desc('score')).limit(10).changes().run(conn)\n",
),
(
rethinkdb.ast.Table.index_create,
b"table.index_create(index_name[, index_function][, multi=False, geo=False]) -> object\n\nCreate a new secondary index on a table. Secondary indexes improve the speed of many read queries at the slight cost of increased storage space and decreased write performance. For more information about secondary indexes, read the article \"[Using secondary indexes in RethinkDB](http://rethinkdb.com/docs/secondary-indexes/).\"\n\nRethinkDB supports different types of secondary indexes:\n\n- *Simple indexes* based on the value of a single field.\n- *Compound indexes* based on multiple fields.\n- *Multi indexes* based on arrays of values.\n- *Geospatial indexes* based on indexes of geometry objects, created when the `geo` optional argument is true.\n- Indexes based on *arbitrary expressions*.\n\nThe `index_function` can be an anonymous function or a binary representation obtained from the `function` field of [index_status](http://rethinkdb.com/api/python/index_status).\n\nIf successful, `create_index` will return an object of the form `{\"created\": 1}`. If an index by that name already exists on the table, a `ReqlRuntimeError` will be thrown.\n\n*Example* Create a simple index based on the field `post_id`.\n\n r.table('comments').index_create('post_id').run(conn)\n*Example* Create a simple index based on the nested field `author > name`.\n\n r.table('comments').index_create('author_name', r.row[\"author\"][\"name\"]).run(conn)\n\n*Example* Create a geospatial index based on the field `location`.\n\n r.table('places').index_create('location', geo=True).run(conn)\n\nA geospatial index field should contain only geometry objects. It will work with geometry ReQL terms ([get_intersecting](http://rethinkdb.com/api/python/get_intersecting/) and [get_nearest](http://rethinkdb.com/api/python/get_nearest/)) as well as index-specific terms ([index_status](http://rethinkdb.com/api/python/index_status), [index_wait](http://rethinkdb.com/api/python/index_wait), [index_drop](http://rethinkdb.com/api/python/index_drop) and [index_list](http://rethinkdb.com/api/python/index_list)). Using terms that rely on non-geometric ordering such as [get_all](http://rethinkdb.com/api/python/get_all/), [order_by](http://rethinkdb.com/api/python/order_by/) and [between](http://rethinkdb.com/api/python/between/) will result in an error.\n\n*Example* Create a compound index based on the fields `post_id` and `date`.\n\n r.table('comments').index_create('post_and_date', [r.row[\"post_id\"], r.row[\"date\"]]).run(conn)\n\n*Example* Create a multi index based on the field `authors`.\n\n r.table('posts').index_create('authors', multi=True).run(conn)\n\n*Example* Create a geospatial multi index based on the field `towers`.\n\n r.table('networks').index_create('towers', geo=True, multi=True).run(conn)\n\n*Example* Create an index based on an arbitrary expression.\n\n r.table('posts').index_create('authors', lambda doc:\n r.branch(\n doc.has_fields(\"updated_at\"),\n doc[\"updated_at\"],\n doc[\"created_at\"]\n )\n ).run(conn)\n\n*Example* Create a new secondary index based on an existing one.\n\n index = r.table('posts').index_status('authors').nth(0)['function'].run(conn)\n r.table('new_posts').index_create('authors', index).run(conn)\n\n*Example* Rebuild an outdated secondary index on a table.\n\n old_index = r.table('posts').index_status('old_index').nth(0)['function'].run(conn)\n r.table('posts').index_create('new_index', old_index).run(conn)\n r.table('posts').index_wait('new_index').run(conn)\n r.table('posts').index_rename('new_index', 'old_index', overwrite=True).run(conn)\n",
),
(
rethinkdb.ast.Table.index_drop,
b"table.index_drop(index_name) -> object\n\nDelete a previously created secondary index of this table.\n\n*Example* Drop a secondary index named 'code_name'.\n\n r.table('dc').index_drop('code_name').run(conn)\n\n",
),
(
rethinkdb.ast.Table.index_list,
b"table.index_list() -> array\n\nList all the secondary indexes of this table.\n\n*Example* List the available secondary indexes for this table.\n\n r.table('marvel').index_list().run(conn)\n",
),
(
rethinkdb.ast.Table.index_rename,
b"table.index_rename(old_index_name, new_index_name[, overwrite=False]) -> object\n\nRename an existing secondary index on a table. If the optional argument `overwrite` is specified as `True`, a previously existing index with the new name will be deleted and the index will be renamed. If `overwrite` is `False` (the default) an error will be raised if the new index name already exists.\n\nThe return value on success will be an object of the format `{'renamed': 1}`, or `{'renamed': 0}` if the old and new names are the same.\n\nAn error will be raised if the old index name does not exist, if the new index name is already in use and `overwrite` is `False`, or if either the old or new index name are the same as the primary key field name.\n\n*Example* Rename an index on the comments table.\n\n r.table('comments').index_rename('post_id', 'message_id').run(conn)\n",
),
(
rethinkdb.ast.Table.index_status,
b'table.index_status([, index...]) -> array\n\nGet the status of the specified indexes on this table, or the status\nof all indexes on this table if no indexes are specified.\n\nThe result is an array where for each index, there will be an object like this one:\n\n {\n "index": <index_name>,\n "ready": True,\n "function": <binary>,\n "multi": <bool>,\n "outdated": <bool>\n }\n\nor this one:\n\n {\n "index": <index_name>,\n "ready": False,\n "progress": <float>,\n "function": <binary>,\n "multi": <bool>,\n "outdated": <bool>\n }\n\nThe `multi` field will be `true` or `false` depending on whether this index was created as a multi index (see [index_create](http://rethinkdb.com/api/python/index_create/) for details). The `outdated` field will be true if the index is outdated in the current version of RethinkDB and needs to be rebuilt. The `progress` field is a float between `0` and `1`, indicating how far along the server is in constructing indexes after the most recent change to the table that would affect them. (`0` indicates no such indexes have been constructed; `1` indicates all of them have.)\n\nThe `function` field is a binary object containing an opaque representation of the secondary index (including the `multi` argument if specified). It can be passed as the second argument to [index_create](http://rethinkdb.com/api/python/index_create/) to create a new index with the same function; see `index_create` for more information.\n\n*Example* Get the status of all the indexes on `test`:\n\n r.table(\'test\').index_status().run(conn)\n\n*Example* Get the status of the `timestamp` index:\n\n r.table(\'test\').index_status(\'timestamp\').run(conn)\n\n*Example* Save the binary representation of the index:\n\n func = r.table(\'test\').index_status(\'timestamp\').nth(0)[\'function\'].run(conn)\n',
),
(
rethinkdb.ast.Table.index_wait,
b'table.index_wait([, index...]) -> array\n\nWait for the specified indexes on this table to be ready, or for all\nindexes on this table to be ready if no indexes are specified.\n\nThe result is an array containing one object for each table index:\n\n {\n "index": <index_name>,\n "ready": True,\n "function": <binary>,\n "multi": <bool>,\n "geo": <bool>,\n "outdated": <bool>\n }\n\nSee the [index_status](http://rethinkdb.com/api/python/index_status) documentation for a description of the field values.\n\n*Example* Wait for all indexes on the table `test` to be ready:\n\n r.table(\'test\').index_wait().run(conn)\n\n*Example* Wait for the index `timestamp` to be ready:\n\n r.table(\'test\').index_wait(\'timestamp\').run(conn)\n',
),
(
rethinkdb.ast.DB.table_create,
b'db.table_create(table_name[, options]) -> object\nr.table_create(table_name[, options]) -> object\n\nCreate a table. A RethinkDB table is a collection of JSON documents.\n\nIf successful, the command returns an object with two fields:\n\n* `tables_created`: always `1`.\n* `config_changes`: a list containing one two-field object, `old_val` and `new_val`:\n * `old_val`: always `None`.\n * `new_val`: the table\'s new [config](http://rethinkdb.com/api/python/config) value.\n\nIf a table with the same name already exists, the command throws `ReqlOpFailedError`.\n\nWhen creating a table you can specify the following options:\n\n* `primary_key`: the name of the primary key. The default primary key is `id`.\n* `durability`: if set to `soft`, writes will be acknowledged by the server immediately and flushed to disk in the background. The default is `hard`: acknowledgment of writes happens after data has been written to disk.\n* `shards`: the number of shards, an integer from 1-64. Defaults to `1`.\n* `replicas`: either an integer or a mapping object. Defaults to `1`.\n * If `replicas` is an integer, it specifies the number of replicas per shard. Specifying more replicas than there are servers will return an error.\n * If `replicas` is an object, it specifies key-value pairs of server tags and the number of replicas to assign to those servers: `{\'tag1\': 2, \'tag2\': 4, \'tag3\': 2, ...}`.\n* `primary_replica_tag`: the primary server specified by its server tag. Required if `replicas` is an object; the tag must be in the object. This must *not* be specified if `replicas` is an integer.\n\nThe data type](http://rethinkdb.com/docs/data-types/) of a primary key is usually a string (like a UUID) or a number, but it can also be a time, binary object, boolean or an array. Data types can be mixed in the primary key field, but all values must be unique. Using an array as a primary key creates a compound index; read the documentation on [compound secondary indexes for more information, as it applies to primary keys as well. Primary keys cannot be objects.\n\nTables will be available for writing when the command returns.\n\n*Example* Create a table named \'dc_universe\' with the default settings.\n\n r.db(\'heroes\').table_create(\'dc_universe\').run(conn)\n \n {\n "config_changes": [\n {\n "new_val": {\n "db": "test",\n "durability": "hard",\n "id": "20ea60d4-3b76-4817-8828-98a236df0297",\n "name": "dc_universe",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "rethinkdb_srv1",\n "replicas": [\n "rethinkdb_srv1",\n "rethinkdb_srv2"\n ]\n }\n ],\n "write_acks": "majority"\n },\n "old_val": None\n }\n ],\n "tables_created": 1\n }\n\n*Example* Create a table named \'dc_universe\' using the field \'name\' as primary key.\n\n r.db(\'test\').table_create(\'dc_universe\', primary_key=\'name\').run(conn)\n\n*Example* Create a table set up for two shards and three replicas per shard. This requires three available servers.\n\n r.db(\'test\').table_create(\'dc_universe\', shards=2, replicas=3).run(conn)\n\nRead [Sharding and replication](http://rethinkdb.com/docs/sharding-and-replication/) for a complete discussion of the subject, including advanced topics.\n',
),
(
rethinkdb.ast.DB.table_drop,
b'db.table_drop(table_name) -> object\n\nDrop a table. The table and all its data will be deleted.\n\nIf successful, the command returns an object with two fields:\n\n* `tables_dropped`: always `1`.\n* `config_changes`: a list containing one two-field object, `old_val` and `new_val`:\n * `old_val`: the dropped table\'s [config](http://rethinkdb.com/api/python/config) value.\n * `new_val`: always `None`.\n\nIf the given table does not exist in the database, the command throws `ReqlRuntimeError`.\n\n*Example* Drop a table named \'dc_universe\'.\n\n r.db(\'test\').table_drop(\'dc_universe\').run(conn)\n \n {\n "config_changes": [\n {\n "old_val": {\n "db": "test",\n "durability": "hard",\n "id": "20ea60d4-3b76-4817-8828-98a236df0297",\n "name": "dc_universe",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "rethinkdb_srv1",\n "replicas": [\n "rethinkdb_srv1",\n "rethinkdb_srv2"\n ]\n }\n ],\n "write_acks": "majority"\n },\n "new_val": None\n }\n ],\n "tables_dropped": 1\n }\n',
),
(
rethinkdb.ast.DB.table_list,
b"db.table_list() -> array\n\nList all table names in a database. The result is a list of strings.\n\n*Example* List all tables of the 'test' database.\n\n r.db('test').table_list().run(conn)\n \n",
),
(
rethinkdb.ast.RqlQuery.__add__,
b'value + value -> value\ntime + number -> time\nvalue.add(value[, value, ...]) -> value\ntime.add(number[, number, ...]) -> time\n\nSum two or more numbers, or concatenate two or more strings or arrays.\n\nThe `add` command can be called in either prefix or infix form; both forms are equivalent. Note that ReQL will not perform type coercion. You cannot, for example, `add` a string and a number together.\n\n*Example* It\'s as easy as 2 + 2 = 4.\n\n > (r.expr(2) + 2).run(conn)\n \n 4\n\n*Example* Concatenate strings.\n\n > (r.expr("foo") + "bar" + "baz").run(conn)\n \n "foobarbaz"\n\n*Example* Concatenate arrays.\n\n > (r.expr(["foo", "bar"]) + ["buzz"]).run(conn)\n \n ["foo", "bar", "buzz"]\n\n*Example* Create a date one year from now.\n\n (r.now() + 365*24*60*60).run(conn)\n\n*Example* Use [args](http://rethinkdb.com/api/python/args) with `add` to sum multiple values.\n\n > vals = [10, 20, 30]\n > r.add(r.args(vals)).run(conn)\n \n 60\n\n*Example* Concatenate an array of strings with `args`.\n\n > vals = [\'foo\', \'bar\', \'buzz\']\n > r.add(r.args(vals)).run(conn)\n \n "foobarbuzz"\n',
),
(
rethinkdb.add,
b'value + value -> value\ntime + number -> time\nvalue.add(value[, value, ...]) -> value\ntime.add(number[, number, ...]) -> time\n\nSum two or more numbers, or concatenate two or more strings or arrays.\n\nThe `add` command can be called in either prefix or infix form; both forms are equivalent. Note that ReQL will not perform type coercion. You cannot, for example, `add` a string and a number together.\n\n*Example* It\'s as easy as 2 + 2 = 4.\n\n > (r.expr(2) + 2).run(conn)\n \n 4\n\n*Example* Concatenate strings.\n\n > (r.expr("foo") + "bar" + "baz").run(conn)\n \n "foobarbaz"\n\n*Example* Concatenate arrays.\n\n > (r.expr(["foo", "bar"]) + ["buzz"]).run(conn)\n \n ["foo", "bar", "buzz"]\n\n*Example* Create a date one year from now.\n\n (r.now() + 365*24*60*60).run(conn)\n\n*Example* Use [args](http://rethinkdb.com/api/python/args) with `add` to sum multiple values.\n\n > vals = [10, 20, 30]\n > r.add(r.args(vals)).run(conn)\n \n 60\n\n*Example* Concatenate an array of strings with `args`.\n\n > vals = [\'foo\', \'bar\', \'buzz\']\n > r.add(r.args(vals)).run(conn)\n \n "foobarbuzz"\n',
),
(
rethinkdb.ast.RqlQuery.__and__,
b'bool & bool -> bool\nbool.and_([bool, bool, ...]) -> bool\nr.and_([bool, bool, ...]) -> bool\n\nCompute the logical "and" of one or more values.\n\nThe `and_` command can be used as an infix operator after its first argument (`r.expr(True).and_(False)`) or given all of its arguments as parameters (`r.and_(True, False)`). The standard Python and operator, `&`, may also be used with ReQL.\n\nCalling `and_` with zero arguments will return `True`.\n\n*Example* Return whether both `a` and `b` evaluate to true.\n\n > a = True\n > b = False\n > (r.expr(a) & b).run(conn)\n \n False\n*Example* Return whether all of `x`, `y` and `z` evaluate to true.\n\n > x = True\n > y = True\n > z = True\n > r.and_(x, y, z).run(conn)\n \n True\n',
),
(
rethinkdb.and_,
b'bool & bool -> bool\nbool.and_([bool, bool, ...]) -> bool\nr.and_([bool, bool, ...]) -> bool\n\nCompute the logical "and" of one or more values.\n\nThe `and_` command can be used as an infix operator after its first argument (`r.expr(True).and_(False)`) or given all of its arguments as parameters (`r.and_(True, False)`). The standard Python and operator, `&`, may also be used with ReQL.\n\nCalling `and_` with zero arguments will return `True`.\n\n*Example* Return whether both `a` and `b` evaluate to true.\n\n > a = True\n > b = False\n > (r.expr(a) & b).run(conn)\n \n False\n*Example* Return whether all of `x`, `y` and `z` evaluate to true.\n\n > x = True\n > y = True\n > z = True\n > r.and_(x, y, z).run(conn)\n \n True\n',
),
(
rethinkdb.ast.RqlQuery.ceil,
b"r.ceil(number) -> number\nnumber.ceil() -> number\n\nRounds the given value up, returning the smallest integer value greater than or equal to the given value (the value's ceiling).\n\n*Example* Return the ceiling of 12.345.\n\n > r.ceil(12.345).run(conn)\n \n 13.0\n\nThe `ceil` command can also be chained after an expression.\n\n*Example* Return the ceiling of -12.345.\n\n > r.expr(-12.345).ceil().run(conn)\n \n -12.0\n\n*Example* Return Iron Man's weight, rounded up with `ceil`.\n\n r.table('superheroes').get('ironman')['weight'].ceil().run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.__div__,
b"number / number -> number\nnumber.div(number[, number ...]) -> number\n\nDivide two numbers.\n\n*Example* It's as easy as 2 / 2 = 1.\n\n (r.expr(2) / 2).run(conn)\n",
),
(
rethinkdb.div,
b"number / number -> number\nnumber.div(number[, number ...]) -> number\n\nDivide two numbers.\n\n*Example* It's as easy as 2 / 2 = 1.\n\n (r.expr(2) / 2).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.__eq__,
b"value.eq(value[, value, ...]) -> bool\nvalue == value -> bool\n\nTest if two or more values are equal.\n\n*Example* See if a user's `role` field is set to `administrator`. \n\n r.table('users').get(1)['role'].eq('administrator').run(conn)\n # alternative syntax\n (r.table('users').get(1)['role'] == 'administrator').run(conn)\n\n*Example* See if three variables contain equal values.\n\n r.eq(a, b, c).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.eq,
b"value.eq(value[, value, ...]) -> bool\nvalue == value -> bool\n\nTest if two or more values are equal.\n\n*Example* See if a user's `role` field is set to `administrator`. \n\n r.table('users').get(1)['role'].eq('administrator').run(conn)\n # alternative syntax\n (r.table('users').get(1)['role'] == 'administrator').run(conn)\n\n*Example* See if three variables contain equal values.\n\n r.eq(a, b, c).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.floor,
b"r.floor(number) -> number\nnumber.floor() -> number\n\nRounds the given value down, returning the largest integer value less than or equal to the given value (the value's floor).\n\n*Example* Return the floor of 12.345.\n\n > r.floor(12.345).run(conn)\n \n 12.0\n\nThe `floor` command can also be chained after an expression.\n\n*Example* Return the floor of -12.345.\n\n > r.expr(-12.345).floor().run(conn)\n \n -13.0\n\n*Example* Return Iron Man's weight, rounded down with `floor`.\n\n r.table('superheroes').get('ironman')['weight'].floor().run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.__ge__,
b"value.ge(value[, value, ...]) -> bool\nvalue >= value -> bool\n\nCompare values, testing if the left-hand value is greater or equal to than the right-hand.\n\n*Example* Test if a player has scored 10 points or more.\n\n r.table('players').get(1)['score'].ge(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] >= 10).run(conn)\n\n*Example* Test if variables are ordered from lowest to highest.\n\n a = 10\n b = 20\n c = 15\n r.ge(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.ge(a, b).and(r.ge(b, c)).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.ge,
b"value.ge(value[, value, ...]) -> bool\nvalue >= value -> bool\n\nCompare values, testing if the left-hand value is greater or equal to than the right-hand.\n\n*Example* Test if a player has scored 10 points or more.\n\n r.table('players').get(1)['score'].ge(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] >= 10).run(conn)\n\n*Example* Test if variables are ordered from lowest to highest.\n\n a = 10\n b = 20\n c = 15\n r.ge(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.ge(a, b).and(r.ge(b, c)).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.__gt__,
b"value.gt(value[, value, ...]) -> bool\nvalue > value -> bool\n\nCompare values, testing if the left-hand value is greater than the right-hand.\n\n*Example* Test if a player has scored more than 10 points.\n\n r.table('players').get(1)['score'].gt(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] > 10).run(conn)\n\n*Example* Test if variables are ordered from lowest to highest, with no values being equal to one another.\n\n a = 10\n b = 20\n c = 15\n r.gt(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.gt(a, b).and(r.gt(b, c)).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.gt,
b"value.gt(value[, value, ...]) -> bool\nvalue > value -> bool\n\nCompare values, testing if the left-hand value is greater than the right-hand.\n\n*Example* Test if a player has scored more than 10 points.\n\n r.table('players').get(1)['score'].gt(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] > 10).run(conn)\n\n*Example* Test if variables are ordered from lowest to highest, with no values being equal to one another.\n\n a = 10\n b = 20\n c = 15\n r.gt(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.gt(a, b).and(r.gt(b, c)).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.__le__,
b"value.le(value[, value, ...]) -> bool\nvalue <= value -> bool\n\nCompare values, testing if the left-hand value is less than or equal to the right-hand.\n\n*Example* Test if a player has scored 10 points or less.\n\n r.table('players').get(1)['score'].le(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] <= 10).run(conn)\n\n*Example* Test if variables are ordered from highest to lowest.\n\n a = 20\n b = 10\n c = 15\n r.le(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.le(a, b).and(r.le(b, c)).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.le,
b"value.le(value[, value, ...]) -> bool\nvalue <= value -> bool\n\nCompare values, testing if the left-hand value is less than or equal to the right-hand.\n\n*Example* Test if a player has scored 10 points or less.\n\n r.table('players').get(1)['score'].le(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] <= 10).run(conn)\n\n*Example* Test if variables are ordered from highest to lowest.\n\n a = 20\n b = 10\n c = 15\n r.le(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.le(a, b).and(r.le(b, c)).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.__lt__,
b"value.lt(value[, value, ...]) -> bool\nvalue < value -> bool\n\nCompare values, testing if the left-hand value is less than the right-hand.\n\n*Example* Test if a player has scored less than 10 points.\n\n r.table('players').get(1)['score'].lt(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] < 10).run(conn)\n\n*Example* Test if variables are ordered from highest to lowest, with no values being equal to one another.\n\n a = 20\n b = 10\n c = 15\n r.lt(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.lt(a, b).and(r.lt(b, c)).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.lt,
b"value.lt(value[, value, ...]) -> bool\nvalue < value -> bool\n\nCompare values, testing if the left-hand value is less than the right-hand.\n\n*Example* Test if a player has scored less than 10 points.\n\n r.table('players').get(1)['score'].lt(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] < 10).run(conn)\n\n*Example* Test if variables are ordered from highest to lowest, with no values being equal to one another.\n\n a = 20\n b = 10\n c = 15\n r.lt(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.lt(a, b).and(r.lt(b, c)).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.__mod__,
b"number % number -> number\n\nFind the remainder when dividing two numbers.\n\n*Example* It's as easy as 2 % 2 = 0.\n\n (r.expr(2) % 2).run(conn)\n\n`\n",
),
(
rethinkdb.mod,
b"number % number -> number\n\nFind the remainder when dividing two numbers.\n\n*Example* It's as easy as 2 % 2 = 0.\n\n (r.expr(2) % 2).run(conn)\n\n`\n",
),
(
rethinkdb.ast.RqlQuery.__mul__,
b'number * number -> number\narray * number -> array\nnumber.mul(number[, number, ...]) -> number\narray.mul(number[, number, ...]) -> array\n\nMultiply two numbers, or make a periodic array.\n\n*Example* It\'s as easy as 2 * 2 = 4.\n\n (r.expr(2) * 2).run(conn)\n\n*Example* Arrays can be multiplied by numbers as well.\n\n (r.expr(["This", "is", "the", "song", "that", "never", "ends."]) * 100).run(conn)\n\n',
),
(
rethinkdb.mul,
b'number * number -> number\narray * number -> array\nnumber.mul(number[, number, ...]) -> number\narray.mul(number[, number, ...]) -> array\n\nMultiply two numbers, or make a periodic array.\n\n*Example* It\'s as easy as 2 * 2 = 4.\n\n (r.expr(2) * 2).run(conn)\n\n*Example* Arrays can be multiplied by numbers as well.\n\n (r.expr(["This", "is", "the", "song", "that", "never", "ends."]) * 100).run(conn)\n\n',
),
(
rethinkdb.ast.RqlQuery.__ne__,
b"value.ne(value[, value, ...]) -> bool\nvalue != value -> bool\n\nTest if two or more values are not equal.\n\n*Example* See if a user's `role` field is not set to `administrator`. \n\n r.table('users').get(1)['role'].ne('administrator').run(conn)\n # alternative syntax\n (r.table('users').get(1)['role'] != 'administrator').run(conn)\n\n*Example* See if three variables do not contain equal values.\n\n r.ne(a, b, c).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.ne,
b"value.ne(value[, value, ...]) -> bool\nvalue != value -> bool\n\nTest if two or more values are not equal.\n\n*Example* See if a user's `role` field is not set to `administrator`. \n\n r.table('users').get(1)['role'].ne('administrator').run(conn)\n # alternative syntax\n (r.table('users').get(1)['role'] != 'administrator').run(conn)\n\n*Example* See if three variables do not contain equal values.\n\n r.ne(a, b, c).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.__invert__,
b"bool.not_() -> bool\nnot_(bool) -> bool\n(~bool) -> bool\n\nCompute the logical inverse (not) of an expression.\n\n`not_` can be called either via method chaining, immediately after an expression that evaluates as a boolean value, or by passing the expression as a parameter to `not_`. All values that are not `False` or `None` will be converted to `True`.\n\nYou may also use `~` as a shorthand operator.\n\n*Example* Not true is false.\n\n r.not_(True).run(conn)\n r.expr(True).not_().run(conn)\n (~r.expr(True)).run(conn)\n\nThese evaluate to `false`.\n\nNote that when using `~` the expression is wrapped in parentheses. Without this, Python will evaluate `r.expr(True)` *first* rather than using the ReQL operator and return an incorrect value. (`~True` evaluates to −2 in Python.)\n\n*Example* Return all the users that do not have a \"flag\" field.\n\n r.table('users').filter(\n lambda users: (~users.has_fields('flag'))\n ).run(conn)\n\n*Example* As above, but prefix-style.\n\n r.table('users').filter(\n lambda users: r.not_(users.has_fields('flag'))\n ).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.not_,
b"bool.not_() -> bool\nnot_(bool) -> bool\n(~bool) -> bool\n\nCompute the logical inverse (not) of an expression.\n\n`not_` can be called either via method chaining, immediately after an expression that evaluates as a boolean value, or by passing the expression as a parameter to `not_`. All values that are not `False` or `None` will be converted to `True`.\n\nYou may also use `~` as a shorthand operator.\n\n*Example* Not true is false.\n\n r.not_(True).run(conn)\n r.expr(True).not_().run(conn)\n (~r.expr(True)).run(conn)\n\nThese evaluate to `false`.\n\nNote that when using `~` the expression is wrapped in parentheses. Without this, Python will evaluate `r.expr(True)` *first* rather than using the ReQL operator and return an incorrect value. (`~True` evaluates to −2 in Python.)\n\n*Example* Return all the users that do not have a \"flag\" field.\n\n r.table('users').filter(\n lambda users: (~users.has_fields('flag'))\n ).run(conn)\n\n*Example* As above, but prefix-style.\n\n r.table('users').filter(\n lambda users: r.not_(users.has_fields('flag'))\n ).run(conn)\n",
),
(
rethinkdb.not_,
b"bool.not_() -> bool\nnot_(bool) -> bool\n(~bool) -> bool\n\nCompute the logical inverse (not) of an expression.\n\n`not_` can be called either via method chaining, immediately after an expression that evaluates as a boolean value, or by passing the expression as a parameter to `not_`. All values that are not `False` or `None` will be converted to `True`.\n\nYou may also use `~` as a shorthand operator.\n\n*Example* Not true is false.\n\n r.not_(True).run(conn)\n r.expr(True).not_().run(conn)\n (~r.expr(True)).run(conn)\n\nThese evaluate to `false`.\n\nNote that when using `~` the expression is wrapped in parentheses. Without this, Python will evaluate `r.expr(True)` *first* rather than using the ReQL operator and return an incorrect value. (`~True` evaluates to −2 in Python.)\n\n*Example* Return all the users that do not have a \"flag\" field.\n\n r.table('users').filter(\n lambda users: (~users.has_fields('flag'))\n ).run(conn)\n\n*Example* As above, but prefix-style.\n\n r.table('users').filter(\n lambda users: r.not_(users.has_fields('flag'))\n ).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.__or__,
b"bool | bool -> bool\nbool.or_([bool, bool, ...]) -> bool\nr.or_([bool, bool, ...]) -> bool\n\nCompute the logical \"or\" of one or more values.\n\nThe `or_` command can be used as an infix operator after its first argument (`r.expr(True).or_(False)`) or given all of its arguments as parameters (`r.or_(True, False)`). The standard Python or operator, `|`, may also be used with ReQL.\n\nCalling `or_` with zero arguments will return `False`.\n\n*Example* Return whether either `a` or `b` evaluate to true.\n\n > a = True\n > b = False\n > (r.expr(a) | b).run(conn)\n \n True\n\n*Example* Return whether any of `x`, `y` or `z` evaluate to true.\n\n > x = False\n > y = False\n > z = False\n > r.or_(x, y, z).run(conn)\n \n False\n\n__Note:__ When using `or` inside a `filter` predicate to test the values of fields that may not exist on the documents being tested, you should use the `default` command with those fields so they explicitly return `False`.\n\n r.table('posts').filter(lambda post:\n post['category'].default('foo').eq('article').or(\n post['genre'].default('foo').eq('mystery'))\n ).run(conn)\n",
),
(
rethinkdb.or_,
b"bool | bool -> bool\nbool.or_([bool, bool, ...]) -> bool\nr.or_([bool, bool, ...]) -> bool\n\nCompute the logical \"or\" of one or more values.\n\nThe `or_` command can be used as an infix operator after its first argument (`r.expr(True).or_(False)`) or given all of its arguments as parameters (`r.or_(True, False)`). The standard Python or operator, `|`, may also be used with ReQL.\n\nCalling `or_` with zero arguments will return `False`.\n\n*Example* Return whether either `a` or `b` evaluate to true.\n\n > a = True\n > b = False\n > (r.expr(a) | b).run(conn)\n \n True\n\n*Example* Return whether any of `x`, `y` or `z` evaluate to true.\n\n > x = False\n > y = False\n > z = False\n > r.or_(x, y, z).run(conn)\n \n False\n\n__Note:__ When using `or` inside a `filter` predicate to test the values of fields that may not exist on the documents being tested, you should use the `default` command with those fields so they explicitly return `False`.\n\n r.table('posts').filter(lambda post:\n post['category'].default('foo').eq('article').or(\n post['genre'].default('foo').eq('mystery'))\n ).run(conn)\n",
),
(
rethinkdb.random,
b"r.random() -> number\nr.random(number[, number], float=True) -> number\nr.random(integer[, integer]) -> integer\n\nGenerate a random number between given (or implied) bounds. `random` takes zero, one or two arguments.\n\n- With __zero__ arguments, the result will be a floating-point number in the range `[0,1)` (from 0 up to but not including 1).\n- With __one__ argument _x,_ the result will be in the range `[0,x)`, and will be integer unless `float=True` is given as an option. Specifying a floating point number without the `float` option will raise an error.\n- With __two__ arguments _x_ and _y,_ the result will be in the range `[x,y)`, and will be integer unless `float=True` is given as an option. If _x_ and _y_ are equal an error will occur, unless the floating-point option has been specified, in which case _x_ will be returned. Specifying a floating point number without the `float` option will raise an error.\n\nNote: The last argument given will always be the 'open' side of the range, but when\ngenerating a floating-point number, the 'open' side may be less than the 'closed' side.\n\n*Example* Generate a random number in the range `[0,1)`\n\n r.random().run(conn)\n\n*Example* Generate a random integer in the range `[0,100)`\n\n r.random(100).run(conn)\n r.random(0, 100).run(conn)\n\n*Example* Generate a random number in the range `(-2.24,1.59]`\n\n r.random(1.59, -2.24, float=True).run(conn)\n\n",
),
(
rethinkdb.ast.RqlQuery.round,
b"r.round(number) -> number\nnumber.round() -> number\n\nRounds the given value to the nearest whole integer.\n\nFor example, values of 1.0 up to but not including 1.5 will return 1.0, similar to floor; values of 1.5 up to 2.0 will return 2.0, similar to ceil.\n\n*Example* Round 12.345 to the nearest integer.\n\n > r.round(12.345).run(conn)\n \n 12.0\n\nThe `round` command can also be chained after an expression.\n\n*Example* Round -12.345 to the nearest integer.\n\n > r.expr(-12.345).round().run(conn)\n \n -12.0\n\n*Example* Return Iron Man's weight, rounded to the nearest integer.\n\n r.table('superheroes').get('ironman')['weight'].round().run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.__sub__,
b"number - number -> number\ntime - number -> time\ntime - time -> number\nnumber.sub(number[, number, ...]) -> number\ntime.sub(number[, number, ...]) -> time\ntime.sub(time) -> number\n\nSubtract two numbers.\n\n*Example* It's as easy as 2 - 2 = 0.\n\n (r.expr(2) - 2).run(conn)\n\n*Example* Create a date one year ago today.\n\n r.now() - 365*24*60*60\n\n*Example* Retrieve how many seconds elapsed between today and `date`.\n\n r.now() - date\n\n",
),
(
rethinkdb.sub,
b"number - number -> number\ntime - number -> time\ntime - time -> number\nnumber.sub(number[, number, ...]) -> number\ntime.sub(number[, number, ...]) -> time\ntime.sub(time) -> number\n\nSubtract two numbers.\n\n*Example* It's as easy as 2 - 2 = 0.\n\n (r.expr(2) - 2).run(conn)\n\n*Example* Create a date one year ago today.\n\n r.now() - 365*24*60*60\n\n*Example* Retrieve how many seconds elapsed between today and `date`.\n\n r.now() - date\n\n",
),
(
rethinkdb.ast.Table.between,
b'table.between(lower_key, upper_key[, options]) -> table_slice\ntable_slice.between(lower_key, upper_key[, options]) -> table_slice\n\nGet all documents between two keys. Accepts three optional arguments: `index`, `left_bound`, and `right_bound`. If `index` is set to the name of a secondary index, `between` will return all documents where that index\'s value is in the specified range (it uses the primary key by default). `left_bound` or `right_bound` may be set to `open` or `closed` to indicate whether or not to include that endpoint of the range (by default, `left_bound` is closed and `right_bound` is open).\n\nYou may also use the special constants `r.minval` and `r.maxval` for boundaries, which represent "less than any index key" and "more than any index key" respectively. For instance, if you use `r.minval` as the lower key, then `between` will return all documents whose primary keys (or indexes) are less than the specified upper key.\n\nIf you use arrays as indexes (compound indexes), they will be sorted using lexicographical order. Take the following range as an example:\n\n\t[[1, "c"] ... [5, "e"]]\n\nThis range includes all compound keys:\n\n* whose first item is 1 and second item is equal or greater than "c";\n* whose first item is between 1 and 5, *regardless of the value of the second item*;\n* whose first item is 5 and second item is less than or equal to "e".\n\n*Example* Find all users with primary key >= 10 and < 20 (a normal half-open interval).\n\n r.table(\'marvel\').between(10, 20).run(conn)\n\n*Example* Find all users with primary key >= 10 and <= 20 (an interval closed on both sides).\n\n r.table(\'marvel\').between(10, 20, right_bound=\'closed\').run(conn)\n\n*Example* Find all users with primary key < 20.\n\n r.table(\'marvel\').between(r.minval, 20).run(conn)\n\n*Example* Find all users with primary key > 10.\n\n r.table(\'marvel\').between(10, r.maxval, left_bound=\'open\').run(conn)\n\n*Example* Between can be used on secondary indexes too. Just pass an optional index argument giving the secondary index to query.\n\n r.table(\'dc\').between(\'dark_knight\', \'man_of_steel\', index=\'code_name\').run(conn)\n\n*Example* Get all users whose full name is between "John Smith" and "Wade Welles."\n\n r.table("users").between(["Smith", "John"], ["Welles", "Wade"],\n index="full_name").run(conn)\n\n*Example* Get the top 10 ranked teams in order.\n\n r.table("teams").order_by(index="rank").between(1, 11).run(conn)\n\n__Note:__ When `between` is chained after [order_by](http://rethinkdb.com/api/python/order_by), both commands must use the same index; `between` will default to the index `order_by` is using, so in this example `"rank"` is automatically being used by `between`. Trying to specify another index will result in a `ReqlRuntimeError`.\n\n*Example* Subscribe to a [changefeed](http://rethinkdb.com/docs/changefeeds/python) of teams ranked in the top 10.\n\n changes = r.table("teams").between(1, 11, index="rank").changes().run(conn)\n\n',
),
(
rethinkdb.db,
b"r.db(db_name) -> db\n\nReference a database.\n\nThe `db` command is optional. If it is not present in a query, the query will run against the database specified in the `db` argument given to [run](http://rethinkdb.com/api/python/run) if one was specified. Otherwise, the query will run against the default database for the connection, specified in the `db` argument to [connect](http://rethinkdb.com/api/python/connect).\n\n*Example* Explicitly specify a database for a query.\n\n r.db('heroes').table('marvel').run(conn)\n\n",
),
(
rethinkdb.ast.RqlQuery.filter,
b'selection.filter(predicate_function[, default=False]) -> selection\nstream.filter(predicate_function[, default=False]) -> stream\narray.filter(predicate_function[, default=False]) -> array\n\nReturn all the elements in a sequence for which the given predicate is true. The return value of `filter` will be the same as the input (sequence, stream, or array). Documents can be filtered in a variety of ways—ranges, nested values, boolean conditions, and the results of anonymous functions.\n\nBy default, `filter` will silently skip documents with missing fields: if the predicate tries to access a field that doesn\'t exist (for instance, the predicate `{\'age\': 30}` applied to a document with no `age` field), that document will not be returned in the result set, and no error will be generated. This behavior can be changed with the `default` optional argument.\n\n* If `default` is set to `True`, documents with missing fields will be returned rather than skipped.\n* If `default` is set to `r.error()`, an `ReqlRuntimeError` will be thrown when a document with a missing field is tested.\n* If `default` is set to `False` (the default), documents with missing fields will be skipped.\n\n*Example* Get all users who are 30 years old.\n\n r.table(\'users\').filter({\'age\': 30}).run(conn)\n\nThe predicate `{\'age\': 30}` selects documents in the `users` table with an `age` field whose value is `30`. Documents with an `age` field set to any other value *or* with no `age` field present are skipped.\n\n<!-- stop -->\n\nWhile the `{\'field\': value}` style of predicate is useful for exact matches, a more general way to write a predicate is to use the [row](http://rethinkdb.com/api/python/row) command with a comparison operator such as [eq](http://rethinkdb.com/api/python/eq) (`==`) or [gt](http://rethinkdb.com/api/python/gt) (`>`), or to use a lambda function that returns `True` or `False`.\n\n r.table(\'users\').filter(r.row["age"] == 30).run(conn)\n\nIn this case, the predicate `r.row["age"] == 30` returns `True` if the field `age` is equal to 30. You can write this predicate as a lambda function instead:\n\n r.table(\'users\').filter(lambda user:\n user["age"] == 30\n ).run(conn)\n\nPredicates to `filter` are evaluated on the server, and must use ReQL expressions. Some Python comparison operators are overloaded by the RethinkDB driver and will be translated to ReQL, such as `==`, `<`/`>` and `|`/`&` (note the single character form, rather than `||`/`&&`).\n\nAlso, predicates must evaluate document fields. They cannot evaluate [secondary indexes](http://rethinkdb.com/docs/secondary-indexes/).\n\n*Example* Get all users who are more than 18 years old.\n\n r.table("users").filter(r.row["age"] > 18).run(conn)\n\n*Example* Get all users who are less than 18 years old and more than 13 years old.\n\n r.table("users").filter((r.row["age"] < 18) & (r.row["age"] > 13)).run(conn)\n\n*Example* Get all users who are more than 18 years old or have their parental consent.\n\n r.table("users").filter(\n (r.row["age"] >= 18) | (r.row["hasParentalConsent"])).run(conn)\n\n*Example* Retrieve all users who subscribed between January 1st, 2012\n(included) and January 1st, 2013 (excluded).\n\n r.table("users").filter(\n lambda user: user["subscription_date"].during(\n r.time(2012, 1, 1, \'Z\'), r.time(2013, 1, 1, \'Z\'))\n ).run(conn)\n\n*Example* Retrieve all users who have a gmail account (whose field `email` ends with `@gmail.com`).\n\n r.table("users").filter(\n lambda user: user["email"].match("@gmail.com$")\n ).run(conn)\n\n*Example* Filter based on the presence of a value in an array.\n\nGiven this schema for the `users` table:\n\n {\n "name": <type \'str\'>\n "places_visited": [<type \'str\'>]\n }\n\nRetrieve all users whose field `places_visited` contains `France`.\n\n r.table("users").filter(lambda user:\n user["places_visited"].contains("France")\n ).run(conn)\n\n*Example* Filter based on nested fields.\n\nGiven this schema for the `users` table:\n\n {\n "id": <type \'str\'>\n "name": {\n "first": <type \'str\'>,\n "middle": <type \'str\'>,\n "last": <type \'str\'>\n }\n }\n\nRetrieve all users named "William Adama" (first name "William", last name\n"Adama"), with any middle name.\n\n r.table("users").filter({\n "name": {\n "first": "William",\n "last": "Adama"\n }\n }).run(conn)\n\nIf you want an exact match for a field that is an object, you will have to use `r.literal`.\n\nRetrieve all users named "William Adama" (first name "William", last name\n"Adama"), and who do not have a middle name.\n\n r.table("users").filter(r.literal({\n "name": {\n "first": "William",\n "last": "Adama"\n }\n })).run(conn)\n\nYou may rewrite these with lambda functions.\n\n r.table("users").filter(\n lambda user:\n (user["name"]["first"] == "William")\n & (user["name"]["last"] == "Adama")\n ).run(conn)\n\n r.table("users").filter(lambda user:\n user["name"] == {\n "first": "William",\n "last": "Adama"\n }\n ).run(conn)\n\nBy default, documents missing fields tested by the `filter` predicate are skipped. In the previous examples, users without an `age` field are not returned. By passing the optional `default` argument to `filter`, you can change this behavior.\n\n*Example* Get all users less than 18 years old or whose `age` field is missing.\n\n r.table("users").filter(r.row["age"] < 18, default=True).run(conn)\n\n*Example* Get all users more than 18 years old. Throw an error if a\ndocument is missing the field `age`.\n\n r.table("users").filter(r.row["age"] > 18, default=r.error()).run(conn)\n\n*Example* Get all users who have given their phone number (all the documents whose field `phone_number` exists and is not `None`).\n\n r.table(\'users\').filter(\n lambda user: user.has_fields(\'phone_number\')\n ).run(conn)\n\n*Example* Get all users with an "editor" role or an "admin" privilege.\n\n r.table(\'users\').filter(\n lambda user: (user[\'role\'] == \'editor\').default(False) |\n (user[\'privilege\'] == \'admin\').default(False)\n ).run(conn)\n\nInstead of using the `default` optional argument to `filter`, we have to use default values on the fields within the `or` clause. Why? If the field on the left side of the `or` clause is missing from a document—in this case, if the user doesn\'t have a `role` field—the predicate will generate an error, and will return `False` (or the value the `default` argument is set to) without evaluating the right side of the `or`. By using `.default(False)` on the fields, each side of the `or` will evaluate to either the field\'s value or `False` if the field doesn\'t exist.\n',
),
(
rethinkdb.ast.Table.get,
b"table.get(key) -> singleRowSelection\n\nGet a document by primary key.\n\nIf no document exists with that primary key, `get` will return `None`.\n\n*Example* Find a document by UUID.\n\n r.table('posts').get('a9849eef-7176-4411-935b-79a6e3c56a74').run(conn)\n\n*Example* Find a document and merge another document with it.\n\n r.table('heroes').get(3).merge(\n { 'powers': ['invisibility', 'speed'] }\n ).run(conn)\n\n_*Example* Subscribe to a document's [changefeed](http://rethinkdb.com/docs/changefeeds/python).\n\n changes = r.table('heroes').get(3).changes().run(conn)\n",
),
(
rethinkdb.ast.Table.get_all,
b"table.get_all([key1, key2...], [, index='id']) -> selection\n\nGet all documents where the given value matches the value of the requested index.\n\n*Example* Secondary index keys are not guaranteed to be unique so we cannot query via [get](http://rethinkdb.com/api/python/get/) when using a secondary index.\n\n r.table('marvel').get_all('man_of_steel', index='code_name').run(conn)\n\n*Example* Without an index argument, we default to the primary index. While `get` will either return the document or `None` when no document with such a primary key value exists, this will return either a one or zero length stream.\n\n r.table('dc').get_all('superman').run(conn)\n\n*Example* You can get multiple documents in a single call to `get_all`.\n\n r.table('dc').get_all('superman', 'ant man').run(conn)\n\n*Example* You can use [args](http://rethinkdb.com/api/python/args/) with `get_all` to retrieve multiple documents whose keys are in a list. This uses `get_all` to get a list of female superheroes, coerces that to an array, and then gets a list of villains who have those superheroes as enemies.\n\n r.do(\n r.table('heroes').get_all('f', index='gender')['id'].coerce_to('array'), \n lamdba heroines: r.table('villains').get_all(r.args(heroines))\n ).run(conn)\n\nCalling `get_all` with zero arguments—which could happen in this example if the `heroines` list had no elements—will return nothing, i.e., a zero length stream.\n\nSecondary indexes can be used in extremely powerful ways with `get_all` and other commands; read the full article on [secondary indexes](http://rethinkdb.com/docs/secondary-indexes) for examples using boolean operations, `contains` and more.\n",
),
(
rethinkdb.ast.DB.table,
b"db.table(name[, read_mode='single', identifier_format='name']) -> table\n\nReturn all documents in a table. Other commands may be chained after `table` to return a subset of documents (such as [get](http://rethinkdb.com/api/python/get/) and [filter](http://rethinkdb.com/api/python/filter/)) or perform further processing.\n\n*Example* Return all documents in the table 'marvel' of the default database.\n\n r.table('marvel').run(conn)\n\n*Example* Return all documents in the table 'marvel' of the database 'heroes'.\n\n r.db('heroes').table('marvel').run(conn)\n\nThere are two optional arguments.\n\n* `read_mode`: One of three possible values affecting the consistency guarantee for the table read:\n * `single` returns values that are in memory (but not necessarily written to disk) on the primary replica. This is the default.\n * `majority` will only return values that are safely committed on disk on a majority of replicas. This requires sending a message to every replica on each read, so it is the slowest but most consistent.\n * `outdated` will return values that are in memory on an arbitrarily-selected replica. This is the fastest but least consistent.\n* `identifier_format`: possible values are `name` and `uuid`, with a default of `name`. If set to `uuid`, then [system tables](http://rethinkdb.com/docs/system-tables/) will refer to servers, databases and tables by UUID rather than name. (This only has an effect when used with system tables.)\n\n*Example* Allow potentially out-of-date data in exchange for faster reads.\n\n r.db('heroes').table('marvel', read_mode='outdated').run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.downcase,
b'string.downcase() -> string\n\nLowercases a string.\n\n*Example*\n\n > r.expr("Sentence about LaTeX.").downcase().run(conn)\n "sentence about latex."\n\n__Note:__ `upcase` and `downcase` only affect ASCII characters.\n',
),
(
rethinkdb.ast.RqlQuery.match,
b'string.match(regexp) -> None/object\n\nMatches against a regular expression. If there is a match, returns an object with the fields:\n\n- `str`: The matched string\n- `start`: The matched string\'s start\n- `end`: The matched string\'s end\n- `groups`: The capture groups defined with parentheses\n\nIf no match is found, returns `None`.\n\n<!-- break -->\n\nAccepts RE2 syntax\n([https://code.google.com/p/re2/wiki/Syntax](https://code.google.com/p/re2/wiki/Syntax)).\nYou can enable case-insensitive matching by prefixing the regular expression with\n`(?i)`. See the linked RE2 documentation for more flags.\n\nThe `match` command does not support backreferences.\n\n*Example* Get all users whose name starts with "A". Because `None` evaluates to `false` in\n[filter](http://rethinkdb.com/api/python/filter/), you can just use the result of `match` for the predicate.\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("^A")\n ).run(conn)\n\n*Example* Get all users whose name ends with "n".\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("n$")\n ).run(conn)\n\n*Example* Get all users whose name has "li" in it\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("li")\n ).run(conn)\n\n*Example* Get all users whose name is "John" with a case-insensitive search.\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("(?i)^john$")\n ).run(conn)\n\n*Example* Get all users whose name is composed of only characters between "a" and "z".\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("(?i)^[a-z]+$")\n ).run(conn)\n\n*Example* Get all users where the zipcode is a string of 5 digits.\n\n r.table(\'users\').filter(lambda doc:\n doc[\'zipcode\'].match("\\d{5}")\n ).run(conn)\n\n*Example* Retrieve the domain of a basic email\n\n r.expr("name@domain.com").match(".*@(.*)").run(conn)\n\nResult:\n\n {\n "start": 0,\n "end": 20,\n "str": "name@domain.com",\n "groups":[\n {\n "end": 17,\n "start": 7,\n "str": "domain.com"\n }\n ]\n }\n\nYou can then retrieve only the domain with the [\\[\\]](http://rethinkdb.com/api/python/get_field) selector.\n\n r.expr("name@domain.com").match(".*@(.*)")["groups"][0]["str"].run(conn)\n\nReturns `\'domain.com\'`\n\n*Example* Fail to parse out the domain and returns `None`.\n\n r.expr("name[at]domain.com").match(".*@(.*)").run(conn)\n',
),
(
rethinkdb.ast.RqlQuery.split,
b'string.split([separator, [max_splits]]) -> array\n\nSplits a string into substrings. Splits on whitespace when called\nwith no arguments. When called with a separator, splits on that\nseparator. When called with a separator and a maximum number of\nsplits, splits on that separator at most `max_splits` times. (Can be\ncalled with `None` as the separator if you want to split on whitespace\nwhile still specifying `max_splits`.)\n\nMimics the behavior of Python\'s `string.split` in edge cases, except\nfor splitting on the empty string, which instead produces an array of\nsingle-character strings.\n\n*Example* Split on whitespace.\n\n > r.expr("foo bar bax").split().run(conn)\n ["foo", "bar", "bax"]\n\n*Example* Split the entries in a CSV file.\n\n > r.expr("12,37,,22,").split(",").run(conn)\n ["12", "37", "", "22", ""]\n\n*Example* Split a string into characters.\n\n > r.expr("mlucy").split("").run(conn)\n ["m", "l", "u", "c", "y"]\n\n*Example* Split the entries in a CSV file, but only at most 3\ntimes.\n\n > r.expr("12,37,,22,").split(",", 3).run(conn)\n ["12", "37", "", "22,"]\n\n*Example* Split on whitespace at most once (i.e. get the first word).\n\n > r.expr("foo bar bax").split(None, 1).run(conn)\n ["foo", "bar bax"]\n',
),
(
rethinkdb.ast.RqlQuery.upcase,
b'string.upcase() -> string\n\nUppercases a string.\n\n*Example*\n\n > r.expr("Sentence about LaTeX.").upcase().run(conn)\n "SENTENCE ABOUT LATEX."\n\n__Note:__ `upcase` and `downcase` only affect ASCII characters.\n',
),
(
rethinkdb.ast.RqlQuery.concat_map,
b"stream.concat_map(function) -> stream\narray.concat_map(function) -> array\n\nConcatenate one or more elements into a single sequence using a mapping function.\n\n`concat_map` works in a similar fashion to [map](http://rethinkdb.com/api/python/map/), applying the given function to each element in a sequence, but it will always return a single sequence. If the mapping function returns a sequence, `map` would produce a sequence of sequences:\n\n r.expr([1, 2, 3]).map(lambda x: [x, x.mul(2)]).run(conn)\n\nResult:\n\n [[1, 2], [2, 4], [3, 6]]\n\nWhereas `concat_map` with the same mapping function would merge those sequences into one:\n\n r.expr([1, 2, 3]).concat_map(lambda x: [x, x.mul(2)]).run(conn)\n\nResult:\n\n [1, 2, 2, 4, 3, 6]\n\nThe return value, array or stream, will be the same type as the input.\n\n*Example* Construct a sequence of all monsters defeated by Marvel heroes. The field \"defeatedMonsters\" is an array of one or more monster names.\n\n r.table('marvel').concat_map(lambda hero: hero['defeatedMonsters']).run(conn)\n\n*Example* Simulate an [eq_join](http://rethinkdb.com/api/python/eq_join/) using `concat_map`. (This is how ReQL joins are implemented internally.)\n\n r.table('posts').concat_map(\n lambda post: r.table('comments').get_all(\n post['id'], index='post_id'\n ).map(\n lambda comment: { 'left': post, 'right': comment}\n )\n ).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.is_empty,
b"sequence.is_empty() -> bool\n\nTest if a sequence is empty.\n\n*Example* Are there any documents in the marvel table?\n\n r.table('marvel').is_empty().run(conn)\n\n",
),
(
rethinkdb.ast.RqlQuery.limit,
b"sequence.limit(n) -> stream\narray.limit(n) -> array\n\nEnd the sequence after the given number of elements.\n\n*Example* Only so many can fit in our Pantheon of heroes.\n\n r.table('marvel').order_by('belovedness').limit(10).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.map,
b"sequence1.map([sequence2, ...], function) -> stream\narray1.map([array2, ...], function) -> array\nr.map(sequence1[, sequence2, ...], function) -> stream\nr.map(array1[, array2, ...], function) -> array\n\nTransform each element of one or more sequences by applying a mapping function to them. If `map` is run with two or more sequences, it will iterate for as many items as there are in the shortest sequence.\n\nNote that `map` can only be applied to sequences, not single values. If you wish to apply a function to a single value/selection (including an array), use the [do](http://rethinkdb.com/api/python/do) command.\n\n*Example* Return the first five squares.\n\n > r.expr([1, 2, 3, 4, 5]).map(lambda val: (val * val)).run(conn)\n \n [1, 4, 9, 16, 25]\n\n*Example* Sum the elements of three sequences.\n\n > sequence1 = [100, 200, 300, 400]\n > sequence2 = [10, 20, 30, 40]\n > sequence3 = [1, 2, 3, 4]\n > r.map(sequence1, sequence2, sequence3,\n lambda val1, val2, val3: (val1 + val2 + val3)).run(conn)\n \n [111, 222, 333, 444]\n\n*Example* Rename a field when retrieving documents using `map` and [merge](http://rethinkdb.com/api/python/merge/).\n\nThis example renames the field `id` to `user_id` when retrieving documents from the table `users`.\n\n r.table('users').map(\n lambda doc: doc.merge({'user_id': doc['id']}).without('id')).run(conn)\n\nNote that in this case, [row](http://rethinkdb.com/api/python/row) may be used as an alternative to writing an anonymous function, as it returns the same value as the function parameter receives:\n\n r.table('users').map(\n r.row.merge({'user_id': r.row['id']}).without('id')).run(conn)\n\n*Example* Assign every superhero an archenemy.\n\n r.table('heroes').map(r.table('villains'),\n lambda hero, villain: hero.merge({'villain': villain})).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.nth,
b"sequence.nth(index) -> object\nselection.nth(index) -> selection<object>\n\nGet the *nth* element of a sequence, counting from zero. If the argument is negative, count from the last element.\n\n*Example* Select the second element in the array.\n\n r.expr([1,2,3]).nth(1).run(conn)\n r.expr([1,2,3])[1].run(conn)\n\n*Example* Select the bronze medalist from the competitors.\n\n r.table('players').order_by(index=r.desc('score')).nth(3).run(conn)\n\n*Example* Select the last place competitor.\n\n r.table('players').order_by(index=r.desc('score')).nth(-1).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.offsets_of,
b"sequence.offsets_of(datum | predicate_function) -> array\n\nGet the indexes of an element in a sequence. If the argument is a predicate, get the indexes of all elements matching it.\n\n*Example* Find the position of the letter 'c'.\n\n r.expr(['a','b','c']).offsets_of('c').run(conn)\n\n*Example* Find the popularity ranking of invisible heroes.\n\n r.table('marvel').union(r.table('dc')).order_by('popularity').offsets_of(\n r.row['superpowers'].contains('invisibility')\n ).run(conn)\n\n",
),
(
rethinkdb.ast.Table.order_by,
b"table.order_by([key | function], index=index_name) -> table_slice\nselection.order_by(key | function[, ...]) -> selection<array>\nsequence.order_by(key | function[, ...]) -> array\n\nSort the sequence by document values of the given key(s). To specify\nthe ordering, wrap the attribute with either `r.asc` or `r.desc`\n(defaults to ascending).\n\n__Note:__ RethinkDB uses byte-wise ordering for `orderBy` and does not support Unicode collations; non-ASCII characters will be sorted by UTF-8 codepoint. For more information on RethinkDB's sorting order, read the section in [ReQL data types](http://rethinkdb.com/docs/data-types/#sorting-order).\n\nSorting without an index requires the server to hold the sequence in\nmemory, and is limited to 100,000 documents (or the setting of the `arrayLimit` option for [run](http://rethinkdb.com/api/python/run)). Sorting with an index can\nbe done on arbitrarily large tables, or after a [between](http://rethinkdb.com/api/python/between/) command\nusing the same index. This applies to both secondary indexes and the primary key (e.g., `index='id'`).\n\n*Example* Order all the posts using the index `date`. \n\n r.table('posts').order_by(index='date').run(conn)\n\n<!-- stop -->\n\nThe index must either be the primary key or have been previously created with [index_create](http://rethinkdb.com/api/python/index_create/).\n\n r.table('posts').index_create('date').run(conn)\n\nYou can also select a descending ordering:\n\n r.table('posts').order_by(index=r.desc('date')).run(conn, callback)\n\n*Example* Order a sequence without an index.\n\n r.table('posts').get(1)['comments'].order_by('date')\n\nYou can also select a descending ordering:\n\n r.table('posts').get(1)['comments'].order_by(r.desc('date'))\n\nIf you're doing ad-hoc analysis and know your table won't have more then 100,000\nelements (or you've changed the setting of the `arrayLimit` option for [run](http://rethinkdb.com/api/python/run)) you can run `order_by` without an index:\n\n r.table('small_table').order_by('date')\n\n*Example* You can efficiently order using multiple fields by using a\n[compound index](http://www.rethinkdb.com/docs/secondary-indexes/python/).\n\nOrder by date and title.\n\n r.table('posts').order_by(index='date_and_title').run(conn)\n\nThe index must either be the primary key or have been previously created with [index_create](http://rethinkdb.com/api/python/index_create/).\n\n r.table('posts').index_create('date_and_title', lambda post:\n [post[\"date\"], post[\"title\"]]).run(conn)\n\n_Note_: You cannot specify multiple orders in a compound index. See [issue #2306](https://github.com/rethinkdb/rethinkdb/issues/2306)\nto track progress.\n\n*Example* If you have a sequence with fewer documents than the `array_limit`, you can order it\nby multiple fields without an index.\n\n r.table('small_table').order_by('date', r.desc('title'))\n\n*Example* Notice that an index ordering always has highest\nprecedence. The following query orders posts by date, and if multiple\nposts were published on the same date, they will be ordered by title.\n\n r.table('post').order_by('title', index='date').run(conn)\n*Example* You can use [nested field](http://rethinkdb.com/docs/cookbook/python/#filtering-based-on-nested-fields) syntax to sort on fields from subdocuments. (You can also create indexes on nested fields using this syntax with `index_create`.)\n\n r.table('user').order_by(lambda user: user['group']['id']).run(conn)\n\n*Example* You can efficiently order data on arbitrary expressions using indexes.\n\n r.table('posts').order_by(index='votes').run(conn)\n\nThe index must have been previously created with [index_create](http://rethinkdb.com/api/ruby/index_create/).\n\n r.table('posts').index_create('votes', lambda post:\n post[\"upvotes\"]-post[\"downvotes\"]\n ).run(conn)\n\n*Example* If you have a sequence with fewer documents than the `array_limit`, you can order it with an arbitrary function directly.\n\n r.table('small_table').order_by(lambda doc:\n doc['upvotes']-doc['downvotes']\n );\n\nYou can also select a descending ordering:\n\n r.table('small_table').order_by(r.desc(lambda doc:\n doc['upvotes']-doc['downvotes']\n ));\n\n*Example* Ordering after a `between` command can be done as long as the same index is being used.\n\n r.table(\"posts\").between(r.time(2013, 1, 1, '+00:00'), r.time(2013, 1, 1, '+00:00'), index='date')\n .order_by(index='date').run(conn);\n\n",
),
(
rethinkdb.ast.RqlQuery.sample,
b"sequence.sample(number) -> selection\nstream.sample(number) -> array\narray.sample(number) -> array\n\nSelect a given number of elements from a sequence with uniform random distribution. Selection is done without replacement.\n\nIf the sequence has less than the requested number of elements (i.e., calling `sample(10)` on a sequence with only five elements), `sample` will return the entire sequence in a random order.\n\n*Example* Select 3 random heroes.\n\n r.table('marvel').sample(3).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.skip,
b"sequence.skip(n) -> stream\narray.skip(n) -> array\n\nSkip a number of elements from the head of the sequence.\n\n*Example* Here in conjunction with [order_by](http://rethinkdb.com/api/python/order_by/) we choose to ignore the most successful heroes.\n\n r.table('marvel').order_by('successMetric').skip(10).run(conn)\n\n",
),
(
rethinkdb.ast.RqlQuery.slice,
b"selection.slice(start_index[, end_index, left_bound='closed', right_bound='open']) -> selection\nstream.slice(start_index[, end_index, left_bound='closed', right_bound='open']) -> stream\narray.slice(start_index[, end_index, left_bound='closed', right_bound='open']) -> array\nbinary.slice(start_index[, end_index, left_bound='closed', right_bound='open']) -> binary\nstring.slice(start_index[, end_index, left_bound='closed', right_bound='open']) -> string\n\nReturn the elements of a sequence within the specified range.\n\n`slice` returns the range between `start_index` and `end_index`. If only `start_index` is specified, `slice` returns the range from that index to the end of the sequence. Specify `left_bound` or `right_bound` as `open` or `closed` to indicate whether to include that endpoint of the range by default: `closed` returns that endpoint, while `open` does not. By default, `left_bound` is closed and `right_bound` is open, so the range `(10,13)` will return the tenth, eleventh and twelfth elements in the sequence.\n\nIf `end_index` is past the end of the sequence, all elements from `start_index` to the end of the sequence will be returned. If `start_index` is past the end of the sequence or `end_index` is less than `start_index`, a zero-element sequence will be returned.\n\nNegative `start_index` and `end_index` values are allowed with arrays; in that case, the returned range counts back from the array's end. That is, the range `(-2)` returns the last two elements, and the range of `(2,-1)` returns the second element through the next-to-last element of the range. An error will be raised on a negative `start_index` or `end_index` with non-arrays. (An `end_index` of −1 *is* allowed with a stream if `right_bound` is closed; this behaves as if no `end_index` was specified.)\n\nIf `slice` is used with a [binary](http://rethinkdb.com/api/python/binary) object, the indexes refer to byte positions within the object. That is, the range `(10,20)` will refer to the 10th byte through the 19th byte.\n\nWith a string, `slice` behaves similarly, with the indexes referring to Unicode codepoints. String indexes start at `0`. (Note that combining codepoints are counted separately.)\n\nIf you are only specifying the indexes and not the bounding options, you may use Python's slice operator as a shorthand: `[start_index:end_index]`.\n\n*Example* Return the fourth, fifth and sixth youngest players. (The youngest player is at index 0, so those are elements 3–5.)\n\n r.table('players').order_by(index='age').slice(3,6).run(conn)\n\nOr, using Python's slice operator:\n\n r.table('players').filter({'class': 'amateur'})[10:20].run(conn)\n\n*Example* Return all but the top three players who have a red flag.\n\n r.table('players').filter({'flag': 'red'}).order_by(index=r.desc('score')).slice(3).run(conn)\n\n*Example* Return holders of tickets `X` through `Y`, assuming tickets are numbered sequentially. We want to include ticket `Y`.\n\n r.table('users').order_by(index='ticket').slice(x, y, right_bound='closed').run(conn)\n\n*Example* Return the elements of an array from the second through two from the end (that is, not including the last two).\n\n r.expr([0,1,2,3,4,5]).slice(2,-2).run(conn)\n [2,3]\n\n*Example* Return the third through fifth characters of a string.\n\n > r.expr(\"rutabaga\").slice(2,5).run(conn)\n \"tab\"\n",
),
(
rethinkdb.ast.RqlQuery.union,
b"stream.union(sequence[, sequence, ...][, interleave=True]) -> stream\narray.union(sequence[, sequence, ...][, interleave=True]) -> array\n\nMerge two or more sequences.\n\nThe optional `interleave` argument controls how the sequences will be merged:\n\n* `True`: results will be mixed together; this is the fastest setting, but ordering of elements is not guaranteed. (This is the default.)\n* `False`: input sequences will be appended to one another, left to right.\n* `\"field_name\"`: a string will be taken as the name of a field to perform a merge-sort on. The input sequences must be ordered _before_ being passed to `union`.\n\n*Example* Construct a stream of all heroes.\n\n r.table('marvel').union(r.table('dc')).run(conn)\n\n*Example* Combine four arrays into one.\n\n r.expr([1, 2]).union([3, 4], [5, 6], [7, 8, 9]).run(conn)\n \n [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n*Example* Create a changefeed from the first example.\n\n r.table('marvel').union(r.table('dc')).changes().run(conn)\n\nNow, when any heroes are added, modified or deleted from either table, a change notification will be sent out.\n\n*Example* Merge-sort the tables of heroes, ordered by name.\n\n r.table('marvel').order_by('name').union(\n r.table('dc').order_by('name'), interleave='name'\n ).run(conn)\n",
),
(
rethinkdb.ast.RqlQuery.with_fields,
b"sequence.with_fields([selector1, selector2...]) -> stream\narray.with_fields([selector1, selector2...]) -> array\n\nPlucks one or more attributes from a sequence of objects, filtering out any objects in the sequence that do not have the specified fields. Functionally, this is identical to [has_fields](http://rethinkdb.com/api/python/has_fields/) followed by [pluck](http://rethinkdb.com/api/python/pluck/) on a sequence.\n\n*Example* Get a list of users and their posts, excluding any users who have not made any posts.\n\nExisting table structure:\n\n [\n { 'id': 1, 'user': 'bob', 'email': 'bob@foo.com', 'posts': [ 1, 4, 5 ] },\n { 'id': 2, 'user': 'george', 'email': 'george@foo.com' },\n { 'id': 3, 'user': 'jane', 'email': 'jane@foo.com', 'posts': [ 2, 3, 6 ] }\n ]\n\nCommand and output:\n\n r.table('users').with_fields('id', 'user', 'posts').run(conn)\n \n [\n { 'id': 1, 'user': 'bob', 'posts': [ 1, 4, 5 ] },\n { 'id': 3, 'user': 'jane', 'posts': [ 2, 3, 6 ] }\n ]\n\n*Example* Use the [nested field syntax](http://rethinkdb.com/docs/nested-fields/) to get a list of users with cell phone numbers in their contacts.\n\n r.table('users').with_fields('id', 'user', {contact: {'phone': 'work'}).run(conn)\n",
),
(
rethinkdb.ast.Table.delete,
b'table.delete([durability="hard", return_changes=False])\n -> object\nselection.delete([durability="hard", return_changes=False])\n -> object\nsingleSelection.delete([durability="hard", return_changes=False])\n -> object\n\nDelete one or more documents from a table.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override the\ntable or query\'s durability setting (set in [run](http://rethinkdb.com/api/python/run/)). \nIn soft durability mode RethinkDB will acknowledge the write immediately after\nreceiving it, but before the write has been committed to disk.\n- `return_changes`:\n - `True`: return a `changes` array consisting of `old_val`/`new_val` objects describing the changes made, only including the documents actually updated.\n - `False`: do not return a `changes` array (the default).\n - `"always"`: behave as `True`, but include all documents the command tried to update whether or not the update was successful. (This was the behavior of `True` pre-2.0.)\n\nDelete returns an object that contains the following attributes:\n\n- `deleted`: the number of documents that were deleted.\n- `skipped`: the number of documents that were skipped. \nFor example, if you attempt to delete a batch of documents, and another concurrent query\ndeletes some of those documents first, they will be counted as skipped.\n- `errors`: the number of errors encountered while performing the delete.\n- `first_error`: If errors were encountered, contains the text of the first error.\n- `inserted`, `replaced`, and `unchanged`: all 0 for a delete operation.\n- `changes`: if `return_changes` is set to `True`, this will be an array of objects, one for each objected affected by the `delete` operation. Each object will have two keys: `{"new_val": None, "old_val": <old value>}`.\n\n*Example* Delete a single document from the table `comments`.\n\n r.table("comments").get("7eab9e63-73f1-4f33-8ce4-95cbea626f59").delete().run(conn)\n\n*Example* Delete all documents from the table `comments`.\n\n r.table("comments").delete().run(conn)\n\n*Example* Delete all comments where the field `id_post` is `3`.\n\n r.table("comments").filter({"id_post": 3}).delete().run(conn)\n\n*Example* Delete a single document from the table `comments` and return its value.\n\n r.table("comments").get("7eab9e63-73f1-4f33-8ce4-95cbea626f59").delete(return_changes=True).run(conn)\n\nThe result will look like:\n\n {\n "deleted": 1,\n "errors": 0,\n "inserted": 0,\n "changes": [\n {\n "new_val": None,\n "old_val": {\n "id": "7eab9e63-73f1-4f33-8ce4-95cbea626f59",\n "author": "William",\n "comment": "Great post",\n "id_post": 3\n }\n }\n ],\n "replaced": 0,\n "skipped": 0,\n "unchanged": 0\n }\n\n*Example* Delete all documents from the table `comments` without waiting for the\noperation to be flushed to disk.\n\n r.table("comments").delete(durability="soft"}).run(conn)\n',
),
(
rethinkdb.ast.Table.insert,
b'table.insert(object | [object1, object2, ...][, durability="hard", return_changes=False, conflict="error"])\n -> object\n\nInsert documents into a table. Accepts a single document or an array of\ndocuments.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override the table or query\'s durability setting (set in [run](http://rethinkdb.com/api/python/run/)). In soft durability mode RethinkDB will acknowledge the write immediately after receiving and caching it, but before the write has been committed to disk.\n- `return_changes`:\n - `True`: return a `changes` array consisting of `old_val`/`new_val` objects describing the changes made, only including the documents actually updated.\n - `False`: do not return a `changes` array (the default).\n - `"always"`: behave as `True`, but include all documents the command tried to update whether or not the update was successful. (This was the behavior of `True` pre-2.0.)\n- `conflict`: Determine handling of inserting documents with the same primary key as existing entries. Possible values are `"error"`, `"replace"` or `"update"`.\n - `"error"`: Do not insert the new document and record the conflict as an error. This is the default.\n - `"replace"`: [Replace](http://rethinkdb.com/api/python/replace/) the old document in its entirety with the new one.\n - `"update"`: [Update](http://rethinkdb.com/api/python/update/) fields of the old document with fields from the new one.\n\nIf `return_changes` is set to `True` or `"always"`, the `changes` array will follow the same order as the inserted documents. Documents in `changes` for which an error occurs (such as a key conflict) will have a third field, `error`, with an explanation of the error.\n\nInsert returns an object that contains the following attributes:\n\n- `inserted`: the number of documents successfully inserted.\n- `replaced`: the number of documents updated when `conflict` is set to `"replace"` or `"update"`.\n- `unchanged`: the number of documents whose fields are identical to existing documents with the same primary key when `conflict` is set to `"replace"` or `"update"`.\n- `errors`: the number of errors encountered while performing the insert.\n- `first_error`: If errors were encountered, contains the text of the first error.\n- `deleted` and `skipped`: 0 for an insert operation.\n- `generated_keys`: a list of generated primary keys for inserted documents whose primary keys were not specified (capped to 100,000).\n- `warnings`: if the field `generated_keys` is truncated, you will get the warning _"Too many generated keys (<X>), array truncated to 100000."_.\n- `changes`: if `return_changes` is set to `True`, this will be an array of objects, one for each objected affected by the `insert` operation. Each object will have two keys: `{"new_val": <new value>, "old_val": None}`.\n\n*Example* Insert a document into the table `posts`.\n\n r.table("posts").insert({\n "id": 1,\n "title": "Lorem ipsum",\n "content": "Dolor sit amet"\n }).run(conn)\n\n<!-- stop -->\n\nThe result will be:\n\n {\n "deleted": 0,\n "errors": 0,\n "inserted": 1,\n "replaced": 0,\n "skipped": 0,\n "unchanged": 0\n }\n\n*Example* Insert a document without a defined primary key into the table `posts` where the\nprimary key is `id`.\n\n r.table("posts").insert({\n "title": "Lorem ipsum",\n "content": "Dolor sit amet"\n }).run(conn)\n\nRethinkDB will generate a primary key and return it in `generated_keys`.\n\n {\n "deleted": 0,\n "errors": 0,\n "generated_keys": [\n "dd782b64-70a7-43e4-b65e-dd14ae61d947"\n ],\n "inserted": 1,\n "replaced": 0,\n "skipped": 0,\n "unchanged": 0\n }\n\nRetrieve the document you just inserted with:\n\n r.table("posts").get("dd782b64-70a7-43e4-b65e-dd14ae61d947").run(conn)\n\nAnd you will get back:\n\n {\n "id": "dd782b64-70a7-43e4-b65e-dd14ae61d947",\n "title": "Lorem ipsum",\n "content": "Dolor sit amet",\n }\n\n*Example* Insert multiple documents into the table `users`.\n\n r.table("users").insert([\n {"id": "william", "email": "william@rethinkdb.com"},\n {"id": "lara", "email": "lara@rethinkdb.com"}\n ]).run(conn)\n\n*Example* Insert a document into the table `users`, replacing the document if the document\nalready exists. \n\n r.table("users").insert(\n {"id": "william", "email": "william@rethinkdb.com"},\n conflict="replace"\n ).run(conn)\n\n*Example* Copy the documents from `posts` to `posts_backup`.\n\n r.table("posts_backup").insert( r.table("posts") ).run(conn)\n\n*Example* Get back a copy of the inserted document (with its generated primary key).\n\n r.table("posts").insert(\n {"title": "Lorem ipsum", "content": "Dolor sit amet"},\n return_changes=True\n ).run(conn)\n\nThe result will be\n\n {\n "deleted": 0,\n "errors": 0,\n "generated_keys": [\n "dd782b64-70a7-43e4-b65e-dd14ae61d947"\n ],\n "inserted": 1,\n "replaced": 0,\n "skipped": 0,\n "unchanged": 0,\n "changes": [\n {\n "old_val": None,\n "new_val": {\n "id": "dd782b64-70a7-43e4-b65e-dd14ae61d947",\n "title": "Lorem ipsum",\n "content": "Dolor sit amet"\n }\n }\n ]\n }\n',
),
(
rethinkdb.ast.Table.replace,
b'table.replace(object | function[, durability="hard", return_changes=False, non_atomic=False])\n -> object\nselection.replace(object | function[, durability="hard", return_changes=False, non_atomic=False])\n -> object\nsingleSelection.replace(object | function[, durability="hard", return_changes=False, non_atomic=False])\n -> object\n\nReplace documents in a table. Accepts a JSON document or a ReQL expression,\nand replaces the original document with the new one. The new document must\nhave the same primary key as the original document.\n\nThe `replace` command can be used to both insert and delete documents. If\nthe "replaced" document has a primary key that doesn\'t exist in the table,\nthe document will be inserted; if an existing document is replaced with\n`None`, the document will be deleted. Since `update` and `replace` operations\nare performed atomically, this allows atomic inserts and deletes as well.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override\n the table or query\'s durability setting (set in [run](http://rethinkdb.com/api/python/run/)). In\n soft durability mode RethinkDB will acknowledge the write immediately after\n receiving it, but before the write has been committed to disk.\n- `return_changes`:\n - `True`: return a `changes` array consisting of `old_val`/`new_val` objects\n describing the changes made, only including the documents actually\n updated.\n - `False`: do not return a `changes` array (the default).\n - `"always"`: behave as `True`, but include all documents the command tried\n to update whether or not the update was successful. (This was the behavior\n of `True` pre-2.0.)\n- `non_atomic`: if set to `True`, executes the replacement and distributes the\n result to replicas in a non-atomic fashion. This flag is required to perform\n non-deterministic updates, such as those that require reading data from\n another table.\n\nReplace returns an object that contains the following attributes:\n\n- `replaced`: the number of documents that were replaced.\n- `unchanged`: the number of documents that would have been modified, except\n that the new value was the same as the old value.\n- `inserted`: the number of new documents added. A document is considered inserted if its primary key did not exist in the table at the time of the `replace` operation.\n- `deleted`: the number of deleted documents when doing a replace with `None`.\n- `errors`: the number of errors encountered while performing the replace.\n- `first_error`: If errors were encountered, contains the text of the first\n error.\n- `skipped`: 0 for a replace operation.\n- `changes`: if `return_changes` is set to `True`, this will be an array of\n objects, one for each objected affected by the `replace` operation. Each\n object will have two keys: `{"new_val": <new value>, "old_val": <old value>}`.\n\n*Example* Replace the document with the primary key `1`.\n\n r.table("posts").get(1).replace({\n "id": 1,\n "title": "Lorem ipsum",\n "content": "Aleas jacta est",\n "status": "draft"\n }).run(conn)\n\n*Example* Remove the field `status` from all posts.\n\n r.table("posts").replace(lambda post:\n post.without("status")\n ).run(conn)\n\n*Example* Remove all the fields that are not `id`, `title` or `content`.\n\n r.table("posts").replace(lambda post:\n post.pluck("id", "title", "content")\n ).run(conn)\n\n*Example* Replace the document with the primary key `1` using soft durability.\n\n r.table("posts").get(1).replace({\n "id": 1,\n "title": "Lorem ipsum",\n "content": "Aleas jacta est",\n "status": "draft"\n }, durability="soft").run(conn)\n\n*Example* Replace the document with the primary key `1` and return the values of the document before\nand after the replace operation.\n\n r.table("posts").get(1).replace({\n "id": 1,\n "title": "Lorem ipsum",\n "content": "Aleas jacta est",\n "status": "published"\n }, return_changes=True).run(conn)\n\nThe result will have a `changes` field:\n\n {\n "deleted": 0,\n "errors": 0,\n "inserted": 0,\n "changes": [\n {\n "new_val": {\n "id":1,\n "title": "Lorem ipsum"\n "content": "Aleas jacta est",\n "status": "published",\n },\n "old_val": {\n "id":1,\n "title": "Lorem ipsum"\n "content": "TODO",\n "status": "draft",\n "author": "William",\n }\n }\n ], \n "replaced": 1,\n "skipped": 0,\n "unchanged": 0\n }\n',
),
(
rethinkdb.ast.Table.sync,
b"table.sync() -> object\n\n`sync` ensures that writes on a given table are written to permanent storage. Queries\nthat specify soft durability (`durability='soft'`) do not give such guarantees, so\n`sync` can be used to ensure the state of these queries. A call to `sync` does not return\nuntil all previous writes to the table are persisted.\n\nIf successful, the operation returns an object: `{\"synced\": 1}`.\n\n*Example* After having updated multiple heroes with soft durability, we now want to wait\nuntil these changes are persisted.\n\n r.table('marvel').sync().run(conn)\n\n",
),
(
rethinkdb.ast.Table.update,
b'table.update(object | function[, durability="hard", return_changes=False, non_atomic=False])\n -> object\nselection.update(object | function[, durability="hard", return_changes=False, non_atomic=False])\n -> object\nsingleSelection.update(object | function[, durability="hard", return_changes=False, non_atomic=False])\n -> object\n\nUpdate JSON documents in a table. Accepts a JSON document, a ReQL expression, or a combination of the two.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override the table or query\'s durability setting (set in [run](http://rethinkdb.com/api/python/run/)). In soft durability mode RethinkDB will acknowledge the write immediately after receiving it, but before the write has been committed to disk.\n- `return_changes`:\n - `True`: return a `changes` array consisting of `old_val`/`new_val` objects describing the changes made, only including the documents actually updated.\n - `False`: do not return a `changes` array (the default).\n - `"always"`: behave as `True`, but include all documents the command tried to update whether or not the update was successful. (This was the behavior of `True` pre-2.0.)\n- `non_atomic`: if set to `True`, executes the update and distributes the result to replicas in a non-atomic fashion. This flag is required to perform non-deterministic updates, such as those that require reading data from another table.\n\nUpdate returns an object that contains the following attributes:\n\n- `replaced`: the number of documents that were updated.\n- `unchanged`: the number of documents that would have been modified except the new value was the same as the old value.\n- `skipped`: the number of documents that were skipped because the document didn\'t exist.\n- `errors`: the number of errors encountered while performing the update.\n- `first_error`: If errors were encountered, contains the text of the first error.\n- `deleted` and `inserted`: 0 for an update operation.\n- `changes`: if `return_changes` is set to `True`, this will be an array of objects, one for each objected affected by the `update` operation. Each object will have two keys: `{"new_val": <new value>, "old_val": <old value>}`.\n\n*Example* Update the status of the post with `id` of `1` to `published`.\n\n r.table("posts").get(1).update({"status": "published"}).run(conn)\n\n*Example* Update the status of all posts to `published`.\n\n r.table("posts").update({"status": "published"}).run(conn)\n\n*Example* Update the status of all the posts written by William.\n\n r.table("posts").filter({"author": "William"}).update({"status": "published"}).run(conn)\n\n*Example* Increment the field `view` of the post with `id` of `1`.\nThis query will throw an error if the field `views` doesn\'t exist.\n\n r.table("posts").get(1).update({\n "views": r.row["views"]+1\n }).run(conn)\n\n*Example* Increment the field `view` of the post with `id` of `1`.\nIf the field `views` does not exist, it will be set to `0`.\n\n r.table("posts").get(1).update({\n "views": (r.row["views"]+1).default(0)\n }).run(conn)\n\n*Example* Perform a conditional update. \nIf the post has more than 100 views, set the `type` of a post to `hot`, else set it to `normal`.\n\n r.table("posts").get(1).update(lambda post:\n r.branch(\n post["views"] > 100,\n {"type": "hot"},\n {"type": "normal"}\n )\n ).run(conn)\n\n*Example* Update the field `num_comments` with the result of a sub-query. Because this update is not atomic, you must pass the `non_atomic` flag.\n\n r.table("posts").get(1).update({\n "num_comments": r.table("comments").filter({"id_post": 1}).count()\n }, non_atomic=True).run(conn)\n\nIf you forget to specify the `non_atomic` flag, you will get a `ReqlRuntimeError`:\n\nReqlRuntimeError: Could not prove function deterministic. Maybe you want to use the non_atomic flag? \n\n*Example* Update the field `num_comments` with a random value between 0 and 100. This update cannot be proven deterministic because of `r.js` (and in fact is not), so you must pass the `non_atomic` flag.\n\n r.table("posts").get(1).update({\n "num_comments": r.js("Math.floor(Math.random()*100)")\n }, non_atomic=True).run(conn)\n\n*Example* Update the status of the post with `id` of `1` using soft durability.\n\n r.table("posts").get(1).update({status: "published"}, durability="soft").run(conn)\n\n*Example* Increment the field `views` and return the values of the document before and after the update operation.\n\n r.table("posts").get(1).update({\n "views": r.row["views"]+1\n }, return_changes=True).run(conn)\n\nThe result will now include a `changes` field:\n\n {\n "deleted": 0,\n "errors": 0,\n "inserted": 0,\n "changes": [\n {\n "new_val": {\n "id": 1,\n "author": "Julius_Caesar",\n "title": "Commentarii de Bello Gallico",\n "content": "Aleas jacta est",\n "views": 207\n },\n "old_val": {\n "id": 1,\n "author": "Julius_Caesar",\n "title": "Commentarii de Bello Gallico",\n "content": "Aleas jacta est",\n "views": 206\n }\n }\n ],\n "replaced": 1,\n "skipped": 0,\n "unchanged": 0\n }\n\nThe `update` command supports RethinkDB\'s nested field syntax to update subdocuments. Consider a user table with contact information in this format:\n\n {\n "id": 10001,\n "name": "Bob Smith",\n "contact": {\n "phone": {\n "work": "408-555-1212",\n "home": "408-555-1213",\n "cell": "408-555-1214"\n },\n "email": {\n "work": "bob@smith.com",\n "home": "bobsmith@example.com",\n "other": "bobbys@moosecall.net"\n },\n "im": {\n "skype": "Bob Smith",\n "aim": "bobmoose",\n "icq": "nobodyremembersicqnumbers"\n }\n },\n "notes": [\n {\n "date": r.time(2014,1,1,\'Z\'),\n "from": "John Doe",\n "subject": "My name is even more boring than Bob\'s"\n },\n {\n "date": r.time(2014,2,2,\'Z\'),\n "from": "Bob Smith Sr",\n "subject": "Happy Second of February"\n }\n ]\n }\n\n*Example* Update Bob Smith\'s cell phone number.\n\n r.table("users").get(10001).update(\n {"contact": {"phone": {"cell": "408-555-4242"}}}\n ).run(conn)\n\n*Example* Add another note to Bob Smith\'s record.\n\n new_note = {\n "date": r.now(),\n "from": "Inigo Montoya",\n "subject": "You killed my father"\n }\n r.table("users").get(10001).update(\n {"notes": r.row["notes"].append(new_note)}\n ).run(conn)\n\n*Example* Send a note to every user with an ICQ number.\n\n icq_note = {\n "date": r.now(),\n "from": "Admin",\n "subject": "Welcome to the future"\n }\n r.table("users").filter(\n r.row.has_fields({"contact": {"im": "icq"}})\n ).update(\n {"notes": r.row["notes"].append(icq_note)}\n ).run(conn)\n\n*Example* Replace all of Bob\'s IM records. Normally, `update` will merge nested documents together; to replace the entire `"im"` document, use the literal command.\n\n r.table(\'users\').get(10001).update(\n {"contact": {"im": r.literal({"aim": "themoosemeister"})}}\n ).run(conn)\n',
),
]
for function, text in docsSource:
try:
text = str(text.decode("utf-8"))
except UnicodeEncodeError:
pass
if hasattr(function, "__func__"):
function.__func__.__doc__ = text
else:
function.__doc__ = text | /rethinkdb_iantocristian-2.4.8.post2.tar.gz/rethinkdb_iantocristian-2.4.8.post2/rethinkdb/docs.py | 0.868827 | 0.346984 | docs.py | pypi |
import base64
import binascii
import hashlib
import hmac
import struct
import sys
import threading
from random import SystemRandom
import six
from rethinkdb import ql2_pb2
from rethinkdb.errors import ReqlAuthError, ReqlDriverError
from rethinkdb.helpers import chain_to_bytes, decode_utf8
from rethinkdb.logger import default_logger
try:
xrange
except NameError:
xrange = range
def compare_digest(digest_a, digest_b):
if sys.version_info[0] == 3:
def xor_bytes(digest_a, digest_b):
return digest_a ^ digest_b
else:
def xor_bytes(digest_a, digest_b, _ord=ord):
return _ord(digest_a) ^ _ord(digest_b)
left = None
right = digest_b
if len(digest_a) == len(digest_b):
left = digest_a
result = 0
if len(digest_a) != len(digest_b):
left = digest_b
result = 1
for l, r in zip(left, right):
result |= xor_bytes(l, r)
return result == 0
def pbkdf2_hmac(hash_name, password, salt, iterations):
if hash_name != "sha256":
raise AssertionError(
'Hash name {hash_name} is not equal with "sha256"'.format(
hash_name=hash_name
)
)
def from_bytes(value, hexlify=binascii.hexlify, int=int):
return int(hexlify(value), 16)
def to_bytes(value, unhexlify=binascii.unhexlify):
try:
return unhexlify(bytes("%064x" % value, "ascii"))
except TypeError:
return unhexlify(bytes("%064x" % value))
cache_key = (password, salt, iterations)
cache_result = HandshakeV1_0.PBKDF2_CACHE.get(cache_key)
if cache_result is not None:
return cache_result
mac = hmac.new(password, None, hashlib.sha256)
def digest(msg, mac=mac):
mac_copy = mac.copy()
mac_copy.update(msg)
return mac_copy.digest()
t = digest(salt + b"\x00\x00\x00\x01")
u = from_bytes(t)
for c in xrange(iterations - 1):
t = digest(t)
u ^= from_bytes(t)
u = to_bytes(u)
HandshakeV1_0.PBKDF2_CACHE.set(cache_key, u)
return u
class LocalThreadCache(threading.local):
def __init__(self):
self._cache = dict()
def set(self, key, val):
self._cache[key] = val
def get(self, key):
return self._cache.get(key)
class HandshakeV1_0(object):
"""
RethinkDB client drivers are responsible for serializing queries, sending them to the server using the
ReQL wire protocol, and receiving responses from the server and returning them to the calling application.
The client sends the protocol version, authentication method, and authentication as a null-terminated JSON
response. RethinkDB currently supports only one authentication method, SCRAM-SHA-256, as specified in IETF
RFC 7677 and RFC 5802. The RFC is followed with the exception of error handling (RethinkDB uses its own
higher level error reporting rather than the e= field). RethinkDB does not support channel binding and clients
should not request this. The value of "authentication" is the "client-first-message" specified in RFC 5802
(the channel binding flag, optional SASL authorization identity, username (n=), and random nonce (r=).
More info: https://rethinkdb.com/docs/writing-drivers/
"""
VERSION = ql2_pb2.VersionDummy.Version.V1_0
PROTOCOL = ql2_pb2.VersionDummy.Protocol.JSON
PBKDF2_CACHE = LocalThreadCache()
def __init__(self, json_decoder, json_encoder, host, port, username, password):
"""
TODO:
"""
self._json_decoder = json_decoder
self._json_encoder = json_encoder
self._host = host
self._port = port
self._username = (
username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C")
)
self._password = six.b(password)
self._compare_digest = self._get_compare_digest()
self._pbkdf2_hmac = self._get_pbkdf2_hmac()
self._protocol_version = 0
self._random_nonce = None
self._first_client_message = None
self._server_signature = None
self._state = 0
@staticmethod
def _get_compare_digest():
"""
Get the compare_digest function from hashlib if package contains it, else get
our own function. Please note that hashlib contains this function only for
Python 2.7.7+ and 3.3+.
"""
return getattr(hmac, "compare_digest", compare_digest)
@staticmethod
def _get_pbkdf2_hmac():
"""
Get the pbkdf2_hmac function from hashlib if package contains it, else get
our own function. Please note that hashlib contains this function only for
Python 2.7.8+ and 3.4+.
"""
return getattr(hashlib, "pbkdf2_hmac", pbkdf2_hmac)
@staticmethod
def _get_authentication_and_first_client_message(response):
"""
Get the first client message and the authentication related data from the
response provided by RethinkDB.
:param response: Response dict from the database
:return: None
"""
first_client_message = response["authentication"].encode("ascii")
authentication = dict(
x.split(b"=", 1) for x in first_client_message.split(b",")
)
return first_client_message, authentication
def _next_state(self):
"""
Increase the state counter.
"""
self._state += 1
def _decode_json_response(self, response, with_utf8=False):
"""
Get decoded json response from response.
:param response: Response from the database
:param with_utf8: UTF-8 decode response before json decoding
:raises: ReqlDriverError | ReqlAuthError
:return: Json decoded response of the original response
"""
if with_utf8:
response = decode_utf8(response)
json_response = self._json_decoder.decode(response)
if not json_response.get("success"):
if 10 <= json_response["error_code"] <= 20:
raise ReqlAuthError(json_response["error"], self._host, self._port)
raise ReqlDriverError(json_response["error"])
return json_response
def _init_connection(self, response):
"""
Prepare initial connection message. We send the version as well as the initial
JSON as an optimization.
:param response: Response from the database
:raises: ReqlDriverError
:return: Initial message which will be sent to the DB
"""
if response is not None:
raise ReqlDriverError("Unexpected response")
self._random_nonce = base64.standard_b64encode(
bytes(bytearray(SystemRandom().getrandbits(8) for i in range(18)))
)
self._first_client_message = chain_to_bytes(
"n=", self._username, ",r=", self._random_nonce
)
initial_message = chain_to_bytes(
struct.pack("<L", self.VERSION),
self._json_encoder.encode(
{
"protocol_version": self._protocol_version,
"authentication_method": "SCRAM-SHA-256",
"authentication": chain_to_bytes(
"n,,", self._first_client_message
).decode("ascii"),
}
).encode("utf-8"),
b"\0",
)
self._next_state()
return initial_message
def _read_response(self, response):
"""
Read response of the server. Due to we've already sent the initial JSON, and only support a single
protocol version at the moment thus we simply read the next response and return an empty string as a
message.
:param response: Response from the database
:raises: ReqlDriverError | ReqlAuthError
:return: An empty string
"""
json_response = self._decode_json_response(response)
min_protocol_version = json_response["min_protocol_version"]
max_protocol_version = json_response["max_protocol_version"]
if not min_protocol_version <= self._protocol_version <= max_protocol_version:
raise ReqlDriverError(
"Unsupported protocol version {version}, expected between {min} and {max}".format(
version=self._protocol_version,
min=min_protocol_version,
max=max_protocol_version,
)
)
self._next_state()
return ""
def _prepare_auth_request(self, response):
"""
Put tohether the authentication request based on the response of the database.
:param response: Response from the database
:raises: ReqlDriverError | ReqlAuthError
:return: An empty string
"""
json_response = self._decode_json_response(response, with_utf8=True)
(
first_client_message,
authentication,
) = self._get_authentication_and_first_client_message(json_response)
random_nonce = authentication[b"r"]
if not random_nonce.startswith(self._random_nonce):
raise ReqlAuthError("Invalid nonce from server", self._host, self._port)
salted_password = self._pbkdf2_hmac(
"sha256",
self._password,
base64.standard_b64decode(authentication[b"s"]),
int(authentication[b"i"]),
)
message_without_proof = chain_to_bytes("c=biws,r=", random_nonce)
auth_message = b",".join(
(self._first_client_message, first_client_message, message_without_proof)
)
self._server_signature = hmac.new(
hmac.new(salted_password, b"Server Key", hashlib.sha256).digest(),
auth_message,
hashlib.sha256,
).digest()
client_key = hmac.new(salted_password, b"Client Key", hashlib.sha256).digest()
client_signature = hmac.new(
hashlib.sha256(client_key).digest(), auth_message, hashlib.sha256
).digest()
client_proof = struct.pack(
"32B",
*(
l ^ random_nonce
for l, random_nonce in zip(
struct.unpack("32B", client_key),
struct.unpack("32B", client_signature),
)
)
)
authentication_request = chain_to_bytes(
self._json_encoder.encode(
{
"authentication": chain_to_bytes(
message_without_proof,
",p=",
base64.standard_b64encode(client_proof),
).decode("ascii")
}
),
b"\0",
)
self._next_state()
return authentication_request
def _read_auth_response(self, response):
"""
Read the authentication request's response sent by the database
and validate the server signature which was returned.
:param response: Response from the database
:raises: ReqlDriverError | ReqlAuthError
:return: None
"""
json_response = self._decode_json_response(response, with_utf8=True)
(
first_client_message,
authentication,
) = self._get_authentication_and_first_client_message(json_response)
server_signature = base64.standard_b64decode(authentication[b"v"])
if not self._compare_digest(server_signature, self._server_signature):
raise ReqlAuthError("Invalid server signature", self._host, self._port)
self._next_state()
def reset(self):
self._random_nonce = None
self._first_client_message = None
self._server_signature = None
self._state = 0
def next_message(self, response):
if response is not None:
response = response.decode("utf-8")
if self._state == 0:
return self._init_connection(response)
elif self._state == 1:
return self._read_response(response)
elif self._state == 2:
return self._prepare_auth_request(response)
elif self._state == 3:
return self._read_auth_response(response)
raise ReqlDriverError("Unexpected handshake state") | /rethinkdb_iantocristian-2.4.8.post2.tar.gz/rethinkdb_iantocristian-2.4.8.post2/rethinkdb/handshake.py | 0.510252 | 0.209308 | handshake.py | pypi |
import rethinkdb
from ._compat import get_unbound_func
get_unbound_func(rethinkdb.net.Cursor.close).__doc__ = u'Close a cursor. Closing a cursor cancels the corresponding query and frees the memory\nassociated with the open request.\n\n*Example:* Close a cursor.\n\n>>> cursor.close()\n'
get_unbound_func(rethinkdb.net.Connection.close).__doc__ = u'Close an open connection. Closing a connection waits until all\noutstanding requests have finished and then frees any open resources\nassociated with the connection. If `noreply_wait` is set to `false`,\nall outstanding requests are canceled immediately.\n\nClosing a connection cancels all outstanding requests and frees the\nmemory associated with any open cursors.\n\n*Example:* Close an open connection, waiting for noreply writes to finish.\n\n>>> conn.close()\n\n*Example:* Close an open connection immediately.\n\n>>> conn.close(noreply_wait=False)\n'
(rethinkdb.connect).__doc__ = u"Create a new connection to the database server. The keyword arguments are:\n\n- `host`: host of the RethinkDB instance. The default value is `localhost`.\n- `port`: the driver port, by default `28015`.\n- `db`: the database used if not explicitly specified in a query, by default `test`.\n- `auth_key`: the authentification key, by default the empty string.\n- `timeout`: timeout period for the connection to be opened, by default `20` (seconds).\n\nIf the connection cannot be established, a `RqlDriverError` exception will be thrown.\n\n*Example:* Opens a connection using the default host and port but specifying the default database.\n\n>>> conn = r.connect(db='marvel')\n\n*Example:* Opens a new connection to the database.\n\n>>> conn = r.connect(host = 'localhost',\n... port = 28015,\n... db = 'heroes',\n... auth_key = 'hunter2')\n\n"
get_unbound_func(rethinkdb.net.Connection.noreply_wait).__doc__ = u'`noreply_wait` ensures that previous queries with the `noreply` flag have been processed\nby the server. Note that this guarantee only applies to queries run on the given connection.\n\n*Example:* We have previously run queries with the `noreply` argument set to `True`. Now\nwait until the server has processed them.\n\n>>> conn.noreply_wait()\n\n'
(rethinkdb).__doc__ = u'The top-level ReQL namespace.\n\n*Example:* Setup your top-level namespace.\n\n>>> import rethinkdb as r\n\n'
get_unbound_func(rethinkdb.net.Connection.reconnect).__doc__ = u'Close and reopen a connection. Closing a connection waits until all\noutstanding requests have finished. If `noreply_wait` is set to\n`false`, all outstanding requests are canceled immediately.\n\n*Example:* Cancel outstanding requests/queries that are no longer needed.\n\n>>> conn.reconnect(noreply_wait=False)\n'
get_unbound_func(rethinkdb.net.Connection.repl).__doc__ = u"Set the default connection to make REPL use easier. Allows calling\n`.run()` on queries without specifying a connection.\n\nConnection objects are not thread-safe and REPL connections should not\nbe used in multi-threaded environments.\n\n*Example:* Set the default connection for the REPL, then call\n`run()` without specifying the connection.\n\n>>> r.connect(db='marvel').repl()\n... r.table('heroes').run()\n"
get_unbound_func(rethinkdb.ast.RqlQuery.run).__doc__ = u'Run a query on a connection, returning either a single JSON result or\na cursor, depending on the query.\n\nThe optional arguments are:\n\n- `use_outdated`: whether or not outdated reads are OK (default: `False`).\n- `time_format`: what format to return times in (default: `\'native\'`).\n Set this to `\'raw\'` if you want times returned as JSON objects for exporting.\n- `profile`: whether or not to return a profile of the query\'s\n execution (default: `false`).\n- `durability`: possible values are `\'hard\'` and `\'soft\'`. In soft durability mode RethinkDB\nwill acknowledge the write immediately after receiving it, but before the write has\nbeen committed to disk.\n- `group_format`: what format to return `grouped_data` and `grouped_streams` in (default: `\'native\'`).\n Set this to `\'raw\'` if you want the raw pseudotype.\n\n*Example:* Run a query on the connection `conn` and print out every\nrow in the result.\n\n>>> for doc in r.table(\'marvel\').run(conn):\n... print doc\n\n*Example:* If you are OK with potentially out of date data from all\nthe tables involved in this query and want potentially faster reads,\npass a flag allowing out of date data in an options object. Settings\nfor individual tables will supercede this global setting for all\ntables in the query.\n\n>>> r.table(\'marvel\').run(conn, use_outdated=True)\n\n*Example:* If you just want to send a write and forget about it, you\ncan set `noreply` to true in the options. In this case `run` will\nreturn immediately.\n\n>>> r.table(\'marvel\').run(conn, noreply=True)\n\n*Example:* If you want to specify whether to wait for a write to be\nwritten to disk (overriding the table\'s default settings), you can set\n`durability` to `\'hard\'` or `\'soft\'` in the options.\n\n>>> r.table(\'marvel\')\n... .insert({ \'superhero\': \'Iron Man\', \'superpower\': \'Arc Reactor\' })\n... .run(conn, noreply=True, durability=\'soft\')\n\n*Example:* If you do not want a time object to be converted to a\nnative date object, you can pass a `time_format` flag to prevent it\n(valid flags are "raw" and "native"). This query returns an object\nwith two fields (`epoch_time` and `$reql_type$`) instead of a native date\nobject.\n\n>>> r.now().run(conn, time_format="raw")\n\n'
get_unbound_func(rethinkdb.net.Connection.use).__doc__ = u"Change the default database on this connection.\n\n*Example:* Change the default database so that we don't need to\nspecify the database when referencing a table.\n\n>>> conn.use('marvel')\n... r.table('heroes').run(conn) # refers to r.db('marvel').table('heroes')\n"
get_unbound_func(rethinkdb.ast.RqlQuery.avg).__doc__ = u"Averages all the elements of a sequence. If called with a field name,\naverages all the values of that field in the sequence, skipping\nelements of the sequence that lack that field. If called with a\nfunction, calls that function on every element of the sequence and\naverages the results, skipping elements of the sequence where that\nfunction returns `None` or a non-existence error.\n\nProduces a non-existence error when called on an empty sequence. You\ncan handle this case with `default`.\n\n*Example:* What's the average of 3, 5, and 7?\n\n>>> r.expr([3, 5, 7]).avg().run(conn)\n\n*Example:* What's the average number of points scored in a game?\n\n>>> r.table('games').avg('points').run(conn)\n\n*Example:* What's the average number of points scored in a game,\ncounting bonus points?\n\n>>> r.table('games').avg(lambda game:\n... game['points'] + game['bonus_points']\n... ).run(conn)\n\n*Example:* What's the average number of points scored in a game?\n(But return `None` instead of erroring of there are no games where\npoints have been scored.)\n\n>>> r.table('games').avg('points').default(None).run(conn)\n"
get_unbound_func(rethinkdb.ast.RqlQuery.contains).__doc__ = u"When called with values, returns `True` if a sequence contains all the\nspecified values. When called with predicate functions, returns `True`\nif for each predicate there exists at least one element of the stream\nwhere that predicate returns `True`.\n\n*Example:* Has Iron Man ever fought Superman?\n\n>>> r.table('marvel').get('ironman')['opponents'].contains('superman').run(conn)\n\n*Example:* Has Iron Man ever defeated Superman in battle?\n\n>>> r.table('marvel').get('ironman')['battles'].contains(lambda battle:\n... (battle['winner'] == 'ironman') & (battle['loser'] == 'superman')\n... ).run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.count).__doc__ = u"Counts the number of elements in a sequence. If called with a value,\ncounts the number of times that value occurs in the sequence. If\ncalled with a predicate function, counts the number of elements in the\nsequence where that function returns `True`.\n\n*Example:* Count the number of users.\n\n>>> r.table('users').count().run(conn)\n\n*Example:* Count the number of 18 year old users.\n\n>>> r.table('users')['age'].count(18).run(conn)\n\n*Example:* Count the number of users over 18.\n\n>>> r.table('users')['age'].count(lambda age: age > 18).run(conn)\n\n>>> r.table('users').count(lambda user: user['age'] > 18).run(conn)\n"
get_unbound_func(rethinkdb.ast.RqlQuery.distinct).__doc__ = u"Removes duplicate elements from a sequence. Returns an array even\nwhen called on a stream. Meant for use on small sequences.\n\n*Example:* Which unique villains have been vanquished by marvel heroes?\n\n>>> r.table('marvel').concat_map(lambda hero: hero['villainList']).distinct().run(conn)\n"
get_unbound_func(rethinkdb.ast.RqlQuery.group).__doc__ = u'Takes a stream and partitions it into multiple groups based on the\nfields or functions provided.\n\n*Example:* Grouping games by player.\n\nSuppose that the table `games` has the following data:\n\n>>> [\n... {"id": 2, "player": "Bob", "points": 15, "type": "ranked"},\n... {"id": 5, "player": "Alice", "points": 7, "type": "free"},\n... {"id": 11, "player": "Bob", "points": 10, "type": "free"},\n... {"id": 12, "player": "Alice", "points": 2, "type": "free"}\n... ]\n\nGrouping games by player can be done with:\n\n>>> r.table(\'games\').group(\'player\').run(conn)\n\nResult:\n\n>>> {\n... "Alice": [\n... {"id": 5, "player": "Alice", "points": 7, "type": "free"},\n... {"id": 12, "player": "Alice", "points": 2, "type": "free"}\n... ],\n... "Bob": [\n... {"id": 2, "player": "Bob", "points": 15, "type": "ranked"},\n... {"id": 11, "player": "Bob", "points": 10, "type": "free"}\n... ]\n... }\n\nCommands chained after `group` will be called on each of these grouped\nsub-streams, producing grouped data.\n\n*Example:* What is each player\'s best game?\n\n>>> r.table(\'games\').group(\'player\').max(\'points\').run(conn)\n\nResult:\n\n>>> {\n... "Alice": {"id": 5, "player": "Alice", "points": 7, "type": "free"},\n... "Bob": {"id": 2, "player": "Bob", "points": 15, "type": "ranked"}\n... }\n\nCommands chained onto grouped data will operate on each grouped datum,\nproducing more grouped data.\n\n*Example:* What is the maximum number of points scored by each player?\n\n>>> r.table(\'games\').group(\'player\').max(\'points\')[\'points\'].run(conn)\n\nResult:\n\n>>> {\n... "Alice": 7,\n... "Bob": 15\n... }\n\nYou can also group by more than one field.\n\n*Example:* What is the maximum number of points scored by each\nplayer for each game type?\n\n>>> r.table(\'games\').group(\'player\', \'type\').max(\'points\')[\'points\'].run(conn)\n\nResult:\n\n>>> {\n... ("Alice", "free"): 7,\n... ("Bob", "free"): 10,\n... ("Bob", "ranked"): 15\n... }\n\nYou can also group by a function.\n\n*Example:* What is the maximum number of points scored by each\nplayer for each game type?\n\n>>> r.table(\'games\')\n... .group(lambda game:\n... game.pluck(\'player\', \'type\')\n... ).max(\'points\')[\'points\'].run(conn)\n\nResult:\n\n>>> {\n... frozenset([(\'player\', \'Alice\'), (\'type\', \'free\')]): 7,\n... frozenset([(\'player\', \'Bob\'), (\'type\', \'free\')]): 10,\n... frozenset([(\'player\', \'Bob\'), (\'type\', \'ranked\')]): 15,\n... }\n\nYou can also group by an index.\n\n*Example:* What is the maximum number of points scored by game type?\n\n>>> r.table(\'games\').group(index=\'type\').max(\'points\')[\'points\'].run(conn)\n\n>>> {\n... "free": 10,\n... "ranked": 15\n... }\n\nIf you want to operate on all the groups rather than operating on each\ngroup (e.g. if you want to order the groups by their reduction), you\ncan use [ungroup](/api/python/ungroup/) to turn a grouped stream or\ngrouped data into an array of objects representing the groups.\n\n*Example:* Ungrouping grouped data.\n\n>>> r.table(\'games\').group(\'player\').max(\'points\')[\'points\'].ungroup().run(conn)\n\nResult:\n\n>>> [\n... {\n... "group": "Alice",\n... "reduction": 7\n... },\n... {\n... "group": "Bob",\n... "reduction": 15\n... }\n... ]\n\nUngrouping is useful e.g. for ordering grouped data, or for inserting\ngrouped data into a table.\n\n*Example:* What is the maximum number of points scored by each\nplayer, with the highest scorers first?\n\n>>> r.table(\'games\')\n... .group(\'player\').max(\'points\')[\'points\']\n... .ungroup().order_by(r.desc(\'reduction\')).run(conn)\n\n>>> [\n... {\n... "group": "Bob",\n... "reduction": 15\n... },\n... {\n... "group": "Alice",\n... "reduction": 7\n... }\n... ]\n\nWhen grouped data are returned to the client, they are transformed\ninto a client-specific native type. (Something similar is done with\n[times](/docs/dates-and-times/).) In Python, grouped data are\ntransformed into a `dictionary`. If the group value is an `array`, the\nkey is converted to a `tuple`. If the group value is a `dictionary`,\nit will be converted to a `frozenset`.\n\nIf you instead want to receive the raw\npseudotype from the server (e.g. if you\'re planning to serialize the\nresult as JSON), you can specify `group_format: \'raw\'` as an optional\nargument to `run`:\n\n*Example:* Get back the raw `GROUPED_DATA` pseudotype.\n\n>>> r.table(\'games\').group(\'player\').avg(\'points\').run(conn, group_format=\'raw\')\n\n>>> {\n... "$reql_type$": "GROUPED_DATA",\n... "data": [\n... ["Alice", 4.5],\n... ["Bob", 12.5]\n... ]\n... }\n\nNot passing the `group_format` flag would return:\n\n>>> {\n... "Alice": 4.5,\n... "Bob": 12.5\n... }\n\nYou might also want to use the [ungroup](/api/python/ungroup/)\ncommand (see above), which will turn the grouped data into an array of\nobjects on the server.\n\nIf you run a query that returns a grouped stream, it will be\nautomatically converted to grouped data before being sent back to you\n(there is currently no efficient way to stream groups from RethinkDB).\nThis grouped data is subject to the array size limit (which means you\ncan\'t have more than 100,000 groups or 100,000 rows in the output).\n\nIn general, operations on grouped streams will be efficiently\ndistributed, and operations on grouped data won\'t be. You can figure\nout what you\'re working with by putting `type_of` on the end of your\nquery. Below are efficient and inefficient examples.\n\n*Example:* Efficient operation.\n\n>>> # r.table(\'games\').group(\'player\').type_of().run(conn)\n... # Returns "GROUPED_STREAM"\n... r.table(\'games\').group(\'player\').min(\'points\').run(conn) # EFFICIENT\n\n*Example:* Inefficient operation.\n\n>>> # r.table(\'games\').group(\'player\').order_by(\'score\').type_of().run(conn)\n... # Returns "GROUPED_DATA"\n... r.table(\'games\').group(\'player\').order_by(\'score\').nth(0).run(conn) # INEFFICIENT\n\nWhat does it mean to be inefficient here? When operating on grouped\ndata rather than a grouped stream, *all* of the data has to be\navailable on the node processing the query. This means that the\noperation will only use one machine\'s resources, and will require\nmemory proportional to the size of the grouped data it\'s operating\non. (In the case of the `order_by` in the inefficient example, that\nmeans memory proportional **to the size of the table**.) The array\nlimit is also enforced for grouped data, so the `order_by` example\nwould fail for tables with more than 100,000 rows.\n\n*Example:* What is the maximum number of points scored by each\nplayer in free games?\n\n>>> r.table(\'games\').filter(lambda game:\n... game[\'type\'] = \'free\'\n... ).group(\'player\').max(\'points\')[\'points\'].run(conn)\n\n>>> {\n... "Alice": 7,\n... "Bob": 10\n... }\n\n*Example:* What is each player\'s highest even and odd score?\n\n>>> r.table(\'games\')\n... .group(\'name\', lambda game:\n... game[\'points\'] % 2\n... ).max(\'points\')[\'points\'].run(conn)\n\n>>> {\n... ("Alice", 1): 7,\n... ("Bob", 0): 10,\n... ("Bob", 1): 15\n... }\n'
get_unbound_func(rethinkdb.ast.RqlQuery.max).__doc__ = u"Finds the maximum of a sequence. If called with a field name, finds\nthe element of that sequence with the largest value in that field. If\ncalled with a function, calls that function on every element of the\nsequence and returns the element which produced the largest value,\nignoring any elements where the function returns `None` or produces a\nnon-existence error.\n\nProduces a non-existence error when called on an empty sequence. You\ncan handle this case with `default`.\n\n*Example:* What's the maximum of 3, 5, and 7?\n\n>>> r.expr([3, 5, 7]).max().run(conn)\n\n*Example:* Which user has scored the most points?\n\n>>> r.table('users').max('points').run(conn)\n\n*Example:* Which user has scored the most points, counting bonus points?\n\n>>> r.table('users').max(lambda user:\n... user['points'] + user['bonus_points']\n... ).run(conn)\n\n*Example:* What is the largest number of points any user has ever scored?\n\n>>> r.table('users').max('points')['points'].run(conn)\n\n*Example:* Which user has scored the most points? (But return\n`None` instead of erroring if no users have ever scored points.)\n\n>>> r.table('users').max('points').default(None).run(conn)\n"
get_unbound_func(rethinkdb.ast.RqlQuery.min).__doc__ = u"Finds the minimum of a sequence. If called with a field name, finds\nthe element of that sequence with the smallest value in that field.\nIf called with a function, calls that function on every element of the\nsequence and returns the element which produced the smallest value,\nignoring any elements where the function returns `None` or produces a\nnon-existence error.\n\nProduces a non-existence error when called on an empty sequence. You\ncan handle this case with `default`.\n\n*Example:* What's the minimum of 3, 5, and 7?\n\n>>> r.expr([3, 5, 7]).min().run(conn)\n\n*Example:* Which user has scored the fewest points?\n\n>>> r.table('users').min('points').run(conn)\n\n*Example:* Which user has scored the fewest points, counting bonus points?\n\n>>> r.table('users').min(lambda user:\n... user['points'] + user['bonus_points']\n... ).run(conn)\n\n*Example:* What is the smallest number of points any user has ever scored?\n\n>>> r.table('users').min('points')['points'].run(conn)\n\n*Example:* Which user has scored the fewest points? (But return\n`None` instead of erroring if no users have ever scored points.)\n\n>>> r.table('users').min('points').default(None).run(conn)\n"
get_unbound_func(rethinkdb.ast.RqlQuery.reduce).__doc__ = u'Produce a single value from a sequence through repeated application of a reduction\nfunction. \nThe reduction function can be called on:\n\n- two elements of the sequence\n- one element of the sequence and one result of a previous reduction\n- two results of previous reductions\n\nThe reduction function can be called on the results of two previous reductions because the\n`reduce` command is distributed and parallelized across shards and CPU cores. A common\nmistaken when using the `reduce` command is to suppose that the reduction is executed\nfrom left to right. Read the [map-reduce in RethinkDB](/docs/map-reduce/) article to\nsee an example.\n\nIf the sequence is empty, the server will produce a `RqlRuntimeError` that can be\ncaught with `default`. \nIf the sequence has only one element, the first element will be returned.\n\n*Example:* Return the number of documents in the table `posts`.\n\n>>> r.table("posts").map(lambda doc: 1)\n... .reduce(lambda left, right: left+right)\n... .default(0).run(conn)\n\nA shorter way to execute this query is to use [count](/api/python/count).\n\n*Example:* Suppose that each `post` has a field `comments` that is an array of\ncomments. \nReturn the number of comments for all posts.\n\n>>> r.table("posts").map(lambda doc:\n... doc["comments"].count()\n... ).reduce(lambda left, right:\n... left+right\n... ).default(0).run(conn)\n\n*Example:* Suppose that each `post` has a field `comments` that is an array of\ncomments. \nReturn the maximum number comments per post.\n\n>>> r.table("posts").map(lambda doc:\n... doc["comments"].count()\n... ).reduce(lambda left, right:\n... r.branch(\n... left > right,\n... left,\n... right\n... )\n... ).default(0).run(conn)\n\nA shorter way to execute this query is to use [max](/api/python/max).\n'
get_unbound_func(rethinkdb.ast.RqlQuery.sum).__doc__ = u"Sums all the elements of a sequence. If called with a field name,\nsums all the values of that field in the sequence, skipping elements\nof the sequence that lack that field. If called with a function,\ncalls that function on every element of the sequence and sums the\nresults, skipping elements of the sequence where that function returns\n`None` or a non-existence error.\n\nReturns `0` when called on an empty sequence.\n\n*Example:* What's 3 + 5 + 7?\n\n>>> r.expr([3, 5, 7]).sum().run(conn)\n\n*Example:* How many points have been scored across all games?\n\n>>> r.table('games').sum('points').run(conn)\n\n*Example:* How many points have been scored across all games,\ncounting bonus points?\n\n>>> r.table('games').sum(lambda game:\n... game['points'] + game['bonus_points']\n... ).run(conn)\n"
get_unbound_func(rethinkdb.ast.RqlQuery.ungroup).__doc__ = u'Takes a grouped stream or grouped data and turns it into an array of\nobjects representing the groups. Any commands chained after `ungroup`\nwill operate on this array, rather than operating on each group\nindividually. This is useful if you want to e.g. order the groups by\nthe value of their reduction.\n\nThe format of the array returned by `ungroup` is the same as the\ndefault native format of grouped data in the JavaScript driver and\ndata explorer.\n\n*Example:* What is the maximum number of points scored by each\nplayer, with the highest scorers first?\n\nSuppose that the table `games` has the following data:\n\n>>> [\n... {"id": 2, "player": "Bob", "points": 15, "type": "ranked"},\n... {"id": 5, "player": "Alice", "points": 7, "type": "free"},\n... {"id": 11, "player": "Bob", "points": 10, "type": "free"},\n... {"id": 12, "player": "Alice", "points": 2, "type": "free"}\n... ]\n\nWe can use this query:\n\n>>> r.table(\'games\')\n... .group(\'player\').max(\'points\')[\'points\']\n... .ungroup().order_by(r.desc(\'reduction\')).run(conn)\n\nResult: \n\n>>> [\n... {\n... "group": "Bob",\n... "reduction": 15\n... },\n... {\n... "group": "Alice",\n... "reduction": 7\n... }\n... ]\n\n*Example:* Select one random player and all their games.\n\n>>> r.table(\'games\').group(\'player\').ungroup().sample(1).run(conn)\n\nResult:\n\n>>> [\n... {\n... "group": "Bob",\n... "reduction": [\n... {"id": 2, "player": "Bob", "points": 15, "type": "ranked"},\n... {"id": 11, "player": "Bob", "points": 10, "type": "free"}\n... ]\n... }\n... ]\n\nNote that if you didn\'t call `ungroup`, you would instead select one\nrandom game from each player:\n\n>>> r.table(\'games\').group(\'player\').sample(1).run(conn)\n\nResult:\n\n>>> {\n... "Alice": [\n... {"id": 5, "player": "Alice", "points": 7, "type": "free"}\n... ],\n... "Bob": [\n... {"id": 11, "player": "Bob", "points": 10, "type": "free"}\n... ]\n... }\n\n*Example:* Types!\n\n>>> r.table(\'games\').group(\'player\').type_of().run(conn) # Returns "GROUPED_STREAM"\n... r.table(\'games\').group(\'player\').ungroup().type_of().run(conn) # Returns "ARRAY"\n... r.table(\'games\').group(\'player\').avg(\'points\').run(conn) # Returns "GROUPED_DATA"\n... r.table(\'games\').group(\'player\').avg(\'points\').ungroup().run(conn) #Returns "ARRAY"\n'
(rethinkdb.branch).__doc__ = u'If the `test` expression returns `False` or `None`, the `false_branch` will be evaluated.\nOtherwise, the `true_branch` will be evaluated.\n \nThe `branch` command is effectively an `if` renamed due to language constraints.\n\n*Example:* Return heroes and superheroes.\n\n>>> r.table(\'marvel\').map(\n... r.branch(\n... r.row[\'victories\'] > 100,\n... r.row[\'name\'] + \' is a superhero\',\n... r.row[\'name\'] + \' is a hero\'\n... )\n... ).run(conn)\n\nIf the documents in the table `marvel` are:\n\n>>> [{\n... "name": "Iron Man",\n... "victories": 214\n... },\n... {\n... "name": "Jubilee",\n... "victories": 9\n... }]\n\nThe results will be:\n\n>>> [\n... "Iron Man is a superhero",\n... "Jubilee is a hero"\n... ]\n'
get_unbound_func(rethinkdb.ast.RqlQuery.coerce_to).__doc__ = u"Converts a value of one type into another.\n\nYou can convert: a selection, sequence, or object into an ARRAY, an array of pairs into an OBJECT, and any DATUM into a STRING.\n\n*Example:* Convert a table to an array.\n\n>>> r.table('marvel').coerce_to('array').run(conn)\n\n*Example:* Convert an array of pairs into an object.\n\n>>> r.expr([['name', 'Ironman'], ['victories', 2000]]).coerce_to('object').run(conn)\n\n*Example:* Convert a number to a string.\n\n>>> r.expr(1).coerce_to('string').run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.default).__doc__ = u'Handle non-existence errors. Tries to evaluate and return its first argument. If an\nerror related to the absence of a value is thrown in the process, or if its first\nargument returns `None`, returns its second argument. (Alternatively, the second argument\nmay be a function which will be called with either the text of the non-existence error\nor `None`.)\n\n*Example:* Suppose we want to retrieve the titles and authors of the table `posts`.\nIn the case where the author field is missing or `None`, we want to retrieve the string\n`Anonymous`.\n\n>>> r.table("posts").map(lambda post:\n... {\n... "title": post["title"],\n... "author": post["author"].default("Anonymous")\n... }\n... ).run(conn)\n\nWe can rewrite the previous query with `r.branch` too.\n\n>>> r.table("posts").map(lambda post:\n... r.branch(\n... post.has_fields("author"),\n... {\n... "title": post["title"],\n... "author": post["author"]\n... },\n... {\n... "title": post["title"],\n... "author": "Anonymous" \n... }\n... )\n... ).run(conn)\n\n*Example:* The `default` command can be useful to filter documents too. Suppose\nwe want to retrieve all our users who are not grown-ups or whose age is unknown\n(i.e the field `age` is missing or equals `None`). We can do it with this query:\n\n>>> r.table("users").filter(lambda user:\n... (user["age"] < 18).default(True)\n... ).run(conn)\n\nOne more way to write the previous query is to set the age to be `-1` when the\nfield is missing.\n\n>>> r.table("users").filter(lambda user:\n... user["age"].default(-1) < 18\n... ).run(conn)\n\nOne last way to do the same query is to use `has_fields`.\n\n>>> r.table("users").filter(lambda user:\n... user.has_fields("age").not_() | (user["age"] < 18)\n... ).run(conn)\n\nThe body of every `filter` is wrapped in an implicit `.default(False)`. You can overwrite\nthe value `False` by passing an option in filter, so the previous query can also be\nwritten like this.\n\n>>> r.table("users").filter(\n... lambda user: (user["age"] < 18).default(True),\n... default=True\n... ).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.do).__doc__ = u"Evaluate an expression and pass its values as arguments to a function or to an expression.\n\nThe last argument to `do` (or, in some forms, the only argument) is an expression or an anonymous function which receives values from either the previous arguments or from prefixed commands chained before `do`. A common use, for example, would be to retrieve a document with `get` and pass it to a function via `do`. The type of `do`'s result is the type of the value returned from the function or last expression.\n\n*Example:* Compute a golfer's net score for a game.\n\n>>> r.table('players').get('86be93eb-a112-48f5-a829-15b2cb49de1d').do(\n... lambda player: player['gross_score'] - player['course_handicap']\n... ).run(conn)\n\n*Example:* Return the name of the best scoring player in a two-player golf match.\n\n>>> r.do(r.table('players').get(id1), r.table('players').get(id2),\n... (lambda player1, player2:\n... r.branch(player1['gross_score'].lt(player2['gross_score']),\n... player1, player2))\n... ).run(conn)\n... \n\nNote that `branch`, the ReQL conditional command, must be used instead of `if`. See the `branch` [documentation](/api/python/branch) for more.\n"
(rethinkdb.error).__doc__ = u"Throw a runtime error. If called with no arguments inside the second argument to `default`, re-throw the current error.\n\n*Example:* Iron Man can't possibly have lost a battle:\n\n>>> r.table('marvel').get('IronMan').do(\n... lambda ironman: r.branch(ironman['victories'] < ironman['battles'],\n... r.error('impossible code path'),\n... ironman)\n... ).run(conn)\n\n"
(rethinkdb.expr).__doc__ = u"Construct a ReQL JSON object from a native object.\n\n*Example:* Objects wrapped with expr can then be manipulated by ReQL API functions.\n\n>>> r.expr({'a':'b'}).merge({'b':[1,2,3]}).run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.for_each).__doc__ = u"Loop over a sequence, evaluating the given write query for each element.\n\n*Example:* Now that our heroes have defeated their villains, we can safely remove them from the villain table.\n\n>>> r.table('marvel').for_each(\n... lambda hero: r.table('villains').get(hero['villainDefeated']).delete()\n... ).run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.info).__doc__ = u"Get information about a ReQL value.\n\n*Example:* Get information about a table such as primary key, or cache size.\n\n>>> r.table('marvel').info().run(conn)\n\n"
(rethinkdb.js).__doc__ = u'Create a javascript expression.\n\n*Example:* Concatenate two strings using Javascript\'\n\n`timeout` is the number of seconds before `r.js` times out. The default value is 5 seconds.\n\n>>> r.js("\'str1\' + \'str2\'").run(conn)\n\n*Example:* Select all documents where the \'magazines\' field is greater than 5 by running Javascript on the server.\n\n>>> r.table(\'marvel\').filter(\n... r.js(\'(function (row) { return row.magazines > 5; })\')\n... ).run(conn)\n\n*Example:* You may also specify a timeout in seconds (defaults to 5).\n\n>>> r.js(\'while(true) {}\', timeout=1.3).run(conn)\n\n'
(rethinkdb.json).__doc__ = u'Parse a JSON string on the server.\n\n*Example:* Send an array to the server\'\n\n>>> r.json("[1,2,3]").run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.type_of).__doc__ = u'Gets the type of a value.\n\n*Example:* Get the type of a string.\n\n>>> r.expr("foo").type_of().run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.date).__doc__ = u'Return a new time object only based on the day, month and year (ie. the same day at 00:00).\n\n*Example:* Retrieve all the users whose birthday is today\n\n>>> r.table("users").filter(lambda user:\n... user["birthdate"].date() == r.now().date()\n... ).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.day).__doc__ = u'Return the day of a time object as a number between 1 and 31.\n\n*Example:* Return the users born on the 24th of any month.\n\n>>> r.table("users").filter(\n... r.row["birthdate"].day() == 24\n... )\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.day_of_week).__doc__ = u'Return the day of week of a time object as a number between 1 and 7 (following ISO 8601 standard). For your convenience, the terms r.monday, r.tuesday etc. are defined and map to the appropriate integer.\n\n*Example:* Return today\'s day of week.\n\n>>> r.now().day_of_week().run(conn)\n\n*Example:* Retrieve all the users who were born on a Tuesday.\n\n>>> r.table("users").filter{ |user|\n... user["birthdate"].day_of_week().eq(r.tuesday)\n... }\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.day_of_year).__doc__ = u'Return the day of the year of a time object as a number between 1 and 366 (following ISO 8601 standard).\n\n*Example:* Retrieve all the users who were born the first day of a year.\n\n>>> r.table("users").filter(\n... r.row["birthdate"].day_of_year() == 1\n... ).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.during).__doc__ = u'Return if a time is between two other times (by default, inclusive for the start, exclusive for the end).\n\n*Example:* Retrieve all the posts that were posted between December 1st, 2013 (inclusive) and December 10th, 2013 (exclusive).\n\n>>> r.table("posts").filter(\n... r.row[\'date\'].during(r.time(2013, 12, 1, "Z"), r.time(2013, 12, 10, "Z"))\n... ).run(conn)\n\n*Example:* Retrieve all the posts that were posted between December 1st, 2013 (exclusive) and December 10th, 2013 (inclusive).\n\n>>> r.table("posts").filter(\n... r.row[\'date\'].during(r.time(2013, 12, 1, "Z"), r.time(2013, 12, 10, "Z"), left_bound="open", right_bound="closed")\n... ).run(conn)\n\n'
(rethinkdb.epoch_time).__doc__ = u'Create a time object based on seconds since epoch. The first argument is a double and\nwill be rounded to three decimal places (millisecond-precision).\n\n*Example:* Update the birthdate of the user "John" to November 3rd, 1986.\n\n>>> r.table("user").get("John").update({"birthdate": r.epoch_time(531360000)}).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.hours).__doc__ = u'Return the hour in a time object as a number between 0 and 23.\n\n*Example:* Return all the posts submitted after midnight and before 4am.\n\n>>> r.table("posts").filter(lambda post:\n... post["date"].hours() < 4\n... ).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.in_timezone).__doc__ = u"Return a new time object with a different timezone. While the time stays the same, the results returned by methods such as hours() will change since they take the timezone into account. The timezone argument has to be of the ISO 8601 format.\n\n*Example:* Hour of the day in San Francisco (UTC/GMT -8, without daylight saving time).\n\n>>> r.now().in_timezone('-08:00').hours().run(conn)\n"
(rethinkdb.iso8601).__doc__ = u'Create a time object based on an iso8601 date-time string (e.g.\n\'2013-01-01T01:01:01+00:00\'). We support all valid ISO 8601 formats except for week\ndates. If you pass an ISO 8601 date-time without a time zone, you must specify the time\nzone with the optarg `default_timezone`. Read more about the ISO 8601 format on the\nWikipedia page.\n\n*Example:* Update the time of John\'s birth.\n\n>>> r.table("user").get("John").update({"birth": r.iso8601(\'1986-11-03T08:30:00-07:00\')}).run(conn)\n'
get_unbound_func(rethinkdb.ast.RqlQuery.minutes).__doc__ = u'Return the minute in a time object as a number between 0 and 59.\n\n*Example:* Return all the posts submitted during the first 10 minutes of every hour.\n\n>>> r.table("posts").filter(lambda post:\n... post["date"].minutes() < 10\n... ).run(conn)\n'
get_unbound_func(rethinkdb.ast.RqlQuery.month).__doc__ = u'Return the month of a time object as a number between 1 and 12. For your convenience, the terms r.january, r.february etc. are defined and map to the appropriate integer.\n\n*Example:* Retrieve all the users who were born in November.\n\n>>> r.table("users").filter(\n... r.row["birthdate"].month() == 11\n... )\n\n*Example:* Retrieve all the users who were born in November.\n\n>>> r.table("users").filter(\n... r.row["birthdate"].month() == r.november\n... )\n\n'
(rethinkdb.now).__doc__ = u'Return a time object representing the current time in UTC. The command now() is computed once when the server receives the query, so multiple instances of r.now() will always return the same time inside a query.\n\n*Example:* Add a new user with the time at which he subscribed.\n\n>>> r.table("users").insert({\n... "name": "John",\n... "subscription_date": r.now()\n... }).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.seconds).__doc__ = u'Return the seconds in a time object as a number between 0 and 59.999 (double precision).\n\n*Example:* Return the post submitted during the first 30 seconds of every minute.\n\n>>> r.table("posts").filter(lambda post:\n... post["date"].seconds() < 30\n... ).run(conn)\n\n'
(rethinkdb.time).__doc__ = u'Create a time object for a specific time.\n\nA few restrictions exist on the arguments:\n\n- `year` is an integer between 1400 and 9,999.\n- `month` is an integer between 1 and 12.\n- `day` is an integer between 1 and 31.\n- `hour` is an integer.\n- `minutes` is an integer.\n- `seconds` is a double. Its value will be rounded to three decimal places\n(millisecond-precision).\n- `timezone` can be `\'Z\'` (for UTC) or a string with the format `\xb1[hh]:[mm]`.\n\n*Example:* Update the birthdate of the user "John" to November 3rd, 1986 UTC.\n\n>>> r.table("user").get("John").update({"birthdate": r.time(1986, 11, 3, \'Z\')}).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.time_of_day).__doc__ = u'Return the number of seconds elapsed since the beginning of the day stored in the time object.\n\n*Example:* Retrieve posts that were submitted before noon.\n\n>>> r.table("posts").filter(\n... r.row["date"].time_of_day() <= 12*60*60\n... ).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.timezone).__doc__ = u'Return the timezone of the time object.\n\n*Example:* Return all the users in the "-07:00" timezone.\n\n>>> r.table("users").filter(lambda user:\n... user["subscriptionDate"].timezone() == "-07:00"\n... )\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.to_epoch_time).__doc__ = u'Convert a time object to its epoch time.\n\n*Example:* Return the current time in seconds since the Unix Epoch with millisecond-precision.\n\n>>> r.now().to_epoch_time()\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.to_iso8601).__doc__ = u'Convert a time object to its iso 8601 format.\n\n*Example:* Return the current time in an ISO8601 format.\n\n>>> r.now().to_iso8601()\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.year).__doc__ = u'Return the year of a time object.\n\n*Example:* Retrieve all the users born in 1986.\n\n>>> r.table("users").filter(lambda user:\n... user["birthdate"].year() == 1986\n... ).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.append).__doc__ = u"Append a value to an array.\n\n*Example:* Retrieve Iron Man's equipment list with the addition of some new boots.\n\n>>> r.table('marvel').get('IronMan')['equipment'].append('newBoots').run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.change_at).__doc__ = u'Change a value in an array at a given index. Returns the modified array.\n\n*Example:* Bruce Banner hulks out.\n\n>>> r.expr(["Iron Man", "Bruce", "Spider-Man"]).change_at(1, "Hulk").run(conn)\n'
get_unbound_func(rethinkdb.ast.RqlQuery.delete_at).__doc__ = u'Remove an element from an array at a given index. Returns the modified array.\n\n*Example:* Hulk decides to leave the avengers.\n\n>>> r.expr(["Iron Man", "Hulk", "Spider-Man"]).delete_at(1).run(conn)\n\n*Example:* Hulk and Thor decide to leave the avengers.\n\n>>> r.expr(["Iron Man", "Hulk", "Thor", "Spider-Man"]).delete_at(1,3).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.difference).__doc__ = u"Remove the elements of one array from another array.\n\n*Example:* Retrieve Iron Man's equipment list without boots.\n\n>>> r.table('marvel').get('IronMan')['equipment'].difference(['Boots']).run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.__getitem__).__doc__ = u"Get a single field from an object. If called on a sequence, gets that field from every\nobject in the sequence, skipping objects that lack it.\n\n*Example:* What was Iron Man's first appearance in a comic?\n\n>>> r.table('marvel').get('IronMan')['firstAppearance'].run(conn)\n"
get_unbound_func(rethinkdb.ast.RqlQuery.has_fields).__doc__ = u'Test if an object has one or more fields. An object has a field if it has that key and the key has a non-null value. For instance, the object `{\'a\': 1,\'b\': 2,\'c\': null}` has the fields `a` and `b`.\n\nWhen applied to a single object, `has_fields` returns `true` if the object has the fields and `false` if it does not. When applied to a sequence, it will return a new sequence (an array or stream) containing the elements that have the specified fields.\n\n*Example:* Return the players who have won games.\n\n>>> r.table(\'players\').has_fields(\'games_won\').run(conn)\n\n*Example:* Test if a specific player has won any games.\n\n>>> r.table(\'players\').get(\n... \'b5ec9714-837e-400c-aa74-dbd35c9a7c4c\').has_fields(\'games_won\').run(conn)\n\n**Nested Fields**\n\n`has_fields` lets you test for nested fields in objects. If the value of a field is itself a set of key/value pairs, you can test for the presence of specific keys.\n\n*Example:* In the `players` table, the `games_won` field contains one or more fields for kinds of games won:\n\n>>> {\n... \'games_won\': {\n... \'playoffs\': 2,\n... \'championships\': 1\n... }\n... }\n\nReturn players who have the "championships" field.\n\n>>> r.table(\'players\').has_fields({\'games_won\': {\'championships\': true}}).run(conn)\n\nNote that `true` in the example above is testing for the existence of `championships` as a field, not testing to see if the value of the `championships` field is set to `true`. There\'s a more convenient shorthand form available. (See [pluck](/api/python/pluck) for more details on this.)\n\n>>> r.table(\'players\').has_fields({\'games_won\': \'championships\'}).run(conn)\n'
get_unbound_func(rethinkdb.ast.RqlQuery.insert_at).__doc__ = u'Insert a value in to an array at a given index. Returns the modified array.\n\n*Example:* Hulk decides to join the avengers.\n\n>>> r.expr(["Iron Man", "Spider-Man"]).insert_at(1, "Hulk").run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.keys).__doc__ = u"Return an array containing all of the object's keys.\n\n*Example:* Get all the keys of a row.\n\n>>> r.table('marvel').get('ironman').keys().run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.merge).__doc__ = u"Merge two objects together to construct a new object with properties from both. Gives preference to attributes from other when there is a conflict.\n\n*Example:* Equip IronMan for battle.\n\n>>> r.table('marvel').get('IronMan').merge(\n... r.table('loadouts').get('alienInvasionKit')\n... ).run(conn)\n\n*Example:* Merge can be used recursively to modify object within objects.\n\n>>> r.expr({'weapons' : {'spectacular graviton beam' : {'dmg' : 10, 'cooldown' : 20}}}).merge(\n... {'weapons' : {'spectacular graviton beam' : {'dmg' : 10}}}\n... ).run(conn)\n\n*Example:* To replace a nested object with another object you can use the literal keyword.\n\n>>> r.expr({'weapons' : {'spectacular graviton beam' : {'dmg' : 10, 'cooldown' : 20}}}).merge(\n... {'weapons' : r.literal({'repulsor rays' : {'dmg' : 3, 'cooldown' : 0}})}\n... ).run(conn)\n\n*Example:* Literal can be used to remove keys from an object as well.\n\n>>> r.expr({'weapons' : {'spectacular graviton beam' : {'dmg' : 10, 'cooldown' : 20}}}).merge(\n... {'weapons' : {'spectacular graviton beam' : r.literal()}}\n... ).run(conn)\n\n"
(rethinkdb.object).__doc__ = u'Creates an object from a list of key-value pairs, where the keys must\nbe strings. `r.object(A, B, C, D)` is equivalent to\n`r.expr([[A, B], [C, D]]).coerce_to(\'OBJECT\')`.\n\n*Example:* Create a simple object.\n\n>>> > r.object(\'id\', 5, \'data\', [\'foo\', \'bar\']).run(conn)\n... {\'data\': ["foo", "bar"], \'id\': 5}\n'
get_unbound_func(rethinkdb.ast.RqlQuery.pluck).__doc__ = u"Plucks out one or more attributes from either an object or a sequence of objects\n(projection).\n\n*Example:* We just need information about IronMan's reactor and not the rest of the\ndocument.\n\n>>> r.table('marvel').get('IronMan').pluck('reactorState', 'reactorPower').run(conn)\n\n*Example:* For the hero beauty contest we only care about certain qualities.\n\n>>> r.table('marvel').pluck('beauty', 'muscleTone', 'charm').run(conn)\n\n*Example:* Pluck can also be used on nested objects.\n\n>>> r.table('marvel').pluck({'abilities' : {'damage' : True, 'mana_cost' : True}, 'weapons' : True}).run(conn)\n\n*Example:* The nested syntax can quickly become overly verbose so there's a shorthand\nfor it.\n\n>>> r.table('marvel').pluck({'abilities' : ['damage', 'mana_cost']}, 'weapons').run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.prepend).__doc__ = u"Prepend a value to an array.\n\n*Example:* Retrieve Iron Man's equipment list with the addition of some new boots.\n\n>>> r.table('marvel').get('IronMan')['equipment'].prepend('newBoots').run(conn)\n"
(rethinkdb.row).__doc__ = u"Returns the currently visited document.\n\n*Example:* Get all users whose age is greater than 5.\n\n>>> r.table('users').filter(r.row['age'] > 5).run(conn)\n\n*Example:* Accessing the attribute 'child' of an embedded document.\n\n>>> r.table('users').filter(r.row['embedded_doc']['child'] > 5).run(conn)\n\n*Example:* Add 1 to every element of an array.\n\n>>> r.expr([1, 2, 3]).map(r.row + 1).run(conn)\n\n*Example:* For nested queries functions should be used instead of r.row.\n\n>>> r.table('users').filter(\n... lambda doc: doc['name'] == r.table('prizes').get('winner')\n... ).run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.set_difference).__doc__ = u"Remove the elements of one array from another and return them as a set (an array with\ndistinct values).\n\n*Example:* Check which pieces of equipment Iron Man has, excluding a fixed list.\n\n>>> r.table('marvel').get('IronMan')['equipment'].set_difference(['newBoots', 'arc_reactor']).run(conn)\n"
get_unbound_func(rethinkdb.ast.RqlQuery.set_insert).__doc__ = u"Add a value to an array and return it as a set (an array with distinct values).\n\n*Example:* Retrieve Iron Man's equipment list with the addition of some new boots.\n\n>>> r.table('marvel').get('IronMan')['equipment'].set_insert('newBoots').run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.set_intersection).__doc__ = u"Intersect two arrays returning values that occur in both of them as a set (an array with\ndistinct values).\n\n*Example:* Check which pieces of equipment Iron Man has from a fixed list.\n\n>>> r.table('marvel').get('IronMan')['equipment'].set_intersection(['newBoots', 'arc_reactor']).run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.set_union).__doc__ = u"Add a several values to an array and return it as a set (an array with distinct values).\n\n*Example:* Retrieve Iron Man's equipment list with the addition of some new boots and an arc reactor.\n\n>>> r.table('marvel').get('IronMan')['equipment'].set_union(['newBoots', 'arc_reactor']).run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.splice_at).__doc__ = u'Insert several values in to an array at a given index. Returns the modified array.\n\n*Example:* Hulk and Thor decide to join the avengers.\n\n>>> r.expr(["Iron Man", "Spider-Man"]).splice_at(1, ["Hulk", "Thor"]).run(conn)\n'
get_unbound_func(rethinkdb.ast.RqlQuery.without).__doc__ = u"The opposite of pluck; takes an object or a sequence of objects, and returns them with\nthe specified paths removed.\n\n*Example:* Since we don't need it for this computation we'll save bandwidth and leave\nout the list of IronMan's romantic conquests.\n\n>>> r.table('marvel').get('IronMan').without('personalVictoriesList').run(conn)\n\n*Example:* Without their prized weapons, our enemies will quickly be vanquished.\n\n>>> r.table('enemies').without('weapons').run(conn)\n\n*Example:* Nested objects can be used to remove the damage subfield from the weapons and abilities fields.\n\n>>> r.table('marvel').without({'weapons' : {'damage' : True}, 'abilities' : {'damage' : True}}).run(conn)\n\n*Example:* The nested syntax can quickly become overly verbose so there's a shorthand for it.\n\n>>> r.table('marvel').without({'weapons' : 'damage', 'abilities' : 'damage'}).run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.eq_join).__doc__ = u"An efficient join that looks up elements in the right table by primary key.\n\n*Example:* Let our heroes join forces to battle evil!\n\n>>> r.table('marvel').eq_join('main_dc_collaborator', r.table('dc')).run(conn)\n\n*Example:* The above query is equivalent to this inner join but runs in O(n log(m))\ntime rather than the O(n * m) time the inner join takes.\n\n>>> r.table('marvel').inner_join(r.table('dc'),\n... lambda left, right: left['main_dc_collaborator'] == right['hero_name']).run(conn)\n\n*Example:* You can take advantage of a secondary index on the second table by giving\nan optional index parameter.\n\n>>> r.table('marvel').eq_join('main_weapon_origin',\n... r.table('mythical_weapons'), index='origin').run(conn)\n\n*Example:* You can pass a function instead of an attribute to join on more\ncomplicated expressions. Here we join to the DC universe collaborator with whom the hero\nhas the most appearances.\n\n>>> r.table('marvel').eq_join(lambda doc:\n... doc['dc_collaborators'].order_by('appearances')[0]['name'],\n... r.table('dc')).run(conn)\n"
get_unbound_func(rethinkdb.ast.RqlQuery.inner_join).__doc__ = u"Returns the inner product of two sequences (e.g. a table, a filter result) filtered by\nthe predicate. The query compares each row of the left sequence with each row of the\nright sequence to find all pairs of rows which satisfy the predicate. When the predicate\nis satisfied, each matched pair of rows of both sequences are combined into a result row.\n\n*Example:* Construct a sequence of documents containing all cross-universe matchups where a marvel hero would lose.\n\n>>> r.table('marvel').inner_join(r.table('dc'), lambda marvelRow, dcRow:\n... marvelRow['strength'] < dcRow['strength']).run(conn)\n"
get_unbound_func(rethinkdb.ast.RqlQuery.outer_join).__doc__ = u"Computes a left outer join by retaining each row in the left table even if no match was\nfound in the right table.\n\n*Example:* Construct a sequence of documents containing all cross-universe matchups\nwhere a marvel hero would lose, but keep marvel heroes who would never lose a matchup in\nthe sequence.\n\n>>> r.table('marvel').outer_join(r.table('dc'),\n... lambda marvelRow, dcRow: marvelRow['strength'] < dcRow['strength']).run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.zip).__doc__ = u"Used to 'zip' up the result of a join by merging the 'right' fields into 'left' fields of each member of the sequence.\n\n*Example:* 'zips up' the sequence by merging the left and right fields produced by a join.\n\n>>> r.table('marvel').eq_join('main_dc_collaborator', r.table('dc')).zip().run(conn)\n"
(rethinkdb.db_create).__doc__ = u'Create a database. A RethinkDB database is a collection of tables, similar to\nrelational databases.\n\nIf successful, the operation returns an object: `{"created": 1}`. If a database with the\nsame name already exists the operation throws `RqlRuntimeError`.\n\nNote: that you can only use alphanumeric characters and underscores for the database name.\n\n*Example:* Create a database named \'superheroes\'.\n\n>>> r.db_create(\'superheroes\').run(conn)\n\n'
(rethinkdb.db_drop).__doc__ = u'Drop a database. The database, all its tables, and corresponding data will be deleted.\n\nIf successful, the operation returns the object `{"dropped": 1}`. If the specified database\ndoesn\'t exist a `RqlRuntimeError` is thrown.\n\n*Example:* Drop a database named \'superheroes\'.\n\n>>> r.db_drop(\'superheroes\').run(conn)\n\n'
(rethinkdb.db_list).__doc__ = u'List all database names in the system. The result is a list of strings.\n\n*Example:* List all databases.\n\n>>> r.db_list().run(conn)\n\n'
get_unbound_func(rethinkdb.ast.Table.index_create).__doc__ = u"Create a new secondary index on this table.\n\n*Example:* To efficiently query our heroes by code name we have to create a secondary\nindex.\n\n>>> r.table('dc').index_create('code_name').run(conn)\n\n*Example:* You can also create a secondary index based on an arbitrary function on the document.\n\n>>> r.table('dc').index_create('power_rating',\n... lambda hero: hero['combat_power'] + (2 * hero['compassion_power'])\n... ).run(conn)\n\n*Example:* A compound index can be created by returning an array of values to use as\nthe secondary index key.\n\n>>> r.table('dc').index_create('parental_planets',\n... lambda hero: [hero['mothers_home_planet'], hero['fathers_home_planet']]\n... ).run(conn)\n\n*Example:* A multi index can be created by passing an optional multi argument. Multi\nindex functions should return arrays and allow you to query based on whether a value\nis present in the returned array. The example would allow us to get heroes who possess a\nspecific ability (the field 'abilities' is an array).\n\n>>> r.table('dc').index_create('abilities', multi=True).run(conn)\n\n*Example:* The above can be combined to create a multi index on a function that\nreturns an array of values.\n\n>>> r.table('dc').index_create('parental_planets',\n... lambda hero: [hero['mothers_home_planet'], hero['fathers_home_planet']],\n... multi=True).run(conn)\n"
get_unbound_func(rethinkdb.ast.Table.index_drop).__doc__ = u"Delete a previously created secondary index of this table.\n\n*Example:* Drop a secondary index named 'code_name'.\n\n>>> r.table('dc').index_drop('code_name').run(conn)\n\n"
get_unbound_func(rethinkdb.ast.Table.index_list).__doc__ = u"List all the secondary indexes of this table.\n\n*Example:* List the available secondary indexes for this table.\n\n>>> r.table('marvel').index_list().run(conn)\n"
get_unbound_func(rethinkdb.ast.Table.index_status).__doc__ = u'Get the status of the specified indexes on this table, or the status\nof all indexes on this table if no indexes are specified.\n\nThe result is an array where for each index, there will be an object like this one:\n\n>>> {\n... "index": <index_name>,\n... "ready": True\n... }\n\nor this one:\n\n>>> {\n... "index": <index_name>,\n... "ready": False,\n... "blocks_processed": <int>,\n... "blocks_total": <int>\n... }\n\n*Example:* Get the status of all the indexes on `test`:\n\n>>> r.table(\'test\').index_status().run(conn)\n\n*Example:* Get the status of the `timestamp` index:\n\n>>> r.table(\'test\').index_status(\'timestamp\').run(conn)\n'
get_unbound_func(rethinkdb.ast.Table.index_wait).__doc__ = u"Wait for the specified indexes on this table to be ready, or for all\nindexes on this table to be ready if no indexes are specified.\n\nThe result is an array where for each index, there will be an object like:\n\n>>> {\n... index: <index_name>,\n... ready: True\n... }\n\n*Example:* Wait for all indexes on the table `test` to be ready:\n\n>>> r.table('test').index_wait().run(conn)\n\n*Example:* Wait for the index `timestamp` to be ready:\n\n>>> r.table('test').index_wait('timestamp').run(conn)\n"
get_unbound_func(rethinkdb.ast.DB.table_create).__doc__ = u"Create a table. A RethinkDB table is a collection of JSON documents.\n\nIf successful, the operation returns an object: `{created: 1}`. If a table with the same\nname already exists, the operation throws `RqlRuntimeError`.\n\nNote: that you can only use alphanumeric characters and underscores for the table name.\n\nWhen creating a table you can specify the following options:\n\n- `primary_key`: the name of the primary key. The default primary key is id;\n- `durability`: if set to `'soft'`, this enables _soft durability_ on this table:\nwrites will be acknowledged by the server immediately and flushed to disk in the\nbackground. Default is `'hard'` (acknowledgement of writes happens after data has been\nwritten to disk);\n- `datacenter`: the name of the datacenter this table should be assigned to.\n\n*Example:* Create a table named 'dc_universe' with the default settings.\n\n>>> r.db('test').table_create('dc_universe').run(conn)\n\n*Example:* Create a table named 'dc_universe' using the field 'name' as primary key.\n\n>>> r.db('test').table_create('dc_universe', primary_key='name').run(conn)\n\n*Example:* Create a table to log the very fast actions of the heroes.\n\n>>> r.db('test').table_create('hero_actions', durability='soft').run(conn)\n\n"
get_unbound_func(rethinkdb.ast.DB.table_drop).__doc__ = u'Drop a table. The table and all its data will be deleted.\n\nIf succesful, the operation returns an object: {"dropped": 1}. If the specified table\ndoesn\'t exist a `RqlRuntimeError` is thrown.\n\n*Example:* Drop a table named \'dc_universe\'.\n\n>>> r.db(\'test\').table_drop(\'dc_universe\').run(conn)\n\n'
get_unbound_func(rethinkdb.ast.DB.table_list).__doc__ = u"List all table names in a database. The result is a list of strings.\n\n*Example:* List all tables of the 'test' database.\n\n>>> r.db('test').table_list().run(conn)\n... \n"
get_unbound_func(rethinkdb.ast.RqlQuery.__add__).__doc__ = u'Sum two numbers, concatenate two strings, or concatenate 2 arrays.\n\n*Example:* It\'s as easy as 2 + 2 = 4.\n\n>>> (r.expr(2) + 2).run(conn)\n\n*Example:* Strings can be concatenated too.\n\n>>> (r.expr("foo") + "bar").run(conn)\n\n*Example:* Arrays can be concatenated too.\n\n>>> (r.expr(["foo", "bar"]) + ["buzz"]).run(conn)\n\n*Example:* Create a date one year from now.\n\n>>> r.now() + 365*24*60*60\n\n'
(rethinkdb.add).__doc__ = u'Sum two numbers, concatenate two strings, or concatenate 2 arrays.\n\n*Example:* It\'s as easy as 2 + 2 = 4.\n\n>>> (r.expr(2) + 2).run(conn)\n\n*Example:* Strings can be concatenated too.\n\n>>> (r.expr("foo") + "bar").run(conn)\n\n*Example:* Arrays can be concatenated too.\n\n>>> (r.expr(["foo", "bar"]) + ["buzz"]).run(conn)\n\n*Example:* Create a date one year from now.\n\n>>> r.now() + 365*24*60*60\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.__and__).__doc__ = u'Compute the logical and of two values.\n\n*Example:* True and false anded is false?\n\n>>> (r.expr(True) & False).run(conn)\n... r.expr(True).and_(False).run(conn)\n... r.and_(True, False).run(conn)\n'
(rethinkdb.and_).__doc__ = u'Compute the logical and of two values.\n\n*Example:* True and false anded is false?\n\n>>> (r.expr(True) & False).run(conn)\n... r.expr(True).and_(False).run(conn)\n... r.and_(True, False).run(conn)\n'
get_unbound_func(rethinkdb.ast.RqlQuery.__div__).__doc__ = u"Divide two numbers.\n\n*Example:* It's as easy as 2 / 2 = 1.\n\n>>> (r.expr(2) / 2).run(conn)\n"
(rethinkdb.div).__doc__ = u"Divide two numbers.\n\n*Example:* It's as easy as 2 / 2 = 1.\n\n>>> (r.expr(2) / 2).run(conn)\n"
get_unbound_func(rethinkdb.ast.RqlQuery.__eq__).__doc__ = u'Test if two values are equal.\n\n*Example:* Does 2 equal 2?\n\n>>> (r.expr(2) == 2).run(conn)\n... r.expr(2).eq(2).run(conn)\n'
get_unbound_func(rethinkdb.ast.RqlQuery.eq).__doc__ = u'Test if two values are equal.\n\n*Example:* Does 2 equal 2?\n\n>>> (r.expr(2) == 2).run(conn)\n... r.expr(2).eq(2).run(conn)\n'
get_unbound_func(rethinkdb.ast.RqlQuery.__ge__).__doc__ = u'Test if the first value is greater than or equal to other.\n\n*Example:* Is 2 greater than or equal to 2?\n\n>>> (r.expr(2) >= 2).run(conn)\n... r.expr(2).ge(2).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.ge).__doc__ = u'Test if the first value is greater than or equal to other.\n\n*Example:* Is 2 greater than or equal to 2?\n\n>>> (r.expr(2) >= 2).run(conn)\n... r.expr(2).ge(2).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.__gt__).__doc__ = u'Test if the first value is greater than other.\n\n*Example:* Is 2 greater than 2?\n\n>>> (r.expr(2) > 2).run(conn)\n... r.expr(2).gt(2).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.gt).__doc__ = u'Test if the first value is greater than other.\n\n*Example:* Is 2 greater than 2?\n\n>>> (r.expr(2) > 2).run(conn)\n... r.expr(2).gt(2).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.__le__).__doc__ = u'Test if the first value is less than or equal to other.\n\n*Example:* Is 2 less than or equal to 2?\n\n>>> (r.expr(2) <= 2).run(conn)\n... r.expr(2).le(2).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.le).__doc__ = u'Test if the first value is less than or equal to other.\n\n*Example:* Is 2 less than or equal to 2?\n\n>>> (r.expr(2) <= 2).run(conn)\n... r.expr(2).le(2).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.__lt__).__doc__ = u'Test if the first value is less than other.\n\n*Example:* Is 2 less than 2?\n\n>>> (r.expr(2) < 2).run(conn)\n... r.expr(2).lt(2).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.lt).__doc__ = u'Test if the first value is less than other.\n\n*Example:* Is 2 less than 2?\n\n>>> (r.expr(2) < 2).run(conn)\n... r.expr(2).lt(2).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.__mod__).__doc__ = u"Find the remainder when dividing two numbers.\n\n*Example:* It's as easy as 2 % 2 = 0.\n\n>>> (r.expr(2) % 2).run(conn)\n\n`\n"
(rethinkdb.mod).__doc__ = u"Find the remainder when dividing two numbers.\n\n*Example:* It's as easy as 2 % 2 = 0.\n\n>>> (r.expr(2) % 2).run(conn)\n\n`\n"
get_unbound_func(rethinkdb.ast.RqlQuery.__mul__).__doc__ = u'Multiply two numbers, or make a periodic array.\n\n*Example:* It\'s as easy as 2 * 2 = 4.\n\n>>> (r.expr(2) * 2).run(conn)\n\n*Example:* Arrays can be multiplied by numbers as well.\n\n>>> (r.expr(["This", "is", "the", "song", "that", "never", "ends."]) * 100).run(conn)\n\n'
(rethinkdb.mul).__doc__ = u'Multiply two numbers, or make a periodic array.\n\n*Example:* It\'s as easy as 2 * 2 = 4.\n\n>>> (r.expr(2) * 2).run(conn)\n\n*Example:* Arrays can be multiplied by numbers as well.\n\n>>> (r.expr(["This", "is", "the", "song", "that", "never", "ends."]) * 100).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.__ne__).__doc__ = u'Test if two values are not equal.\n\n*Example:* Does 2 not equal 2?\n\n>>> (r.expr(2) != 2).run(conn)\n... r.expr(2).ne(2).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.ne).__doc__ = u'Test if two values are not equal.\n\n*Example:* Does 2 not equal 2?\n\n>>> (r.expr(2) != 2).run(conn)\n... r.expr(2).ne(2).run(conn)\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.__invert__).__doc__ = u'Compute the logical inverse (not) of an expression.\n\n`not_` can be called either via method chaining, immediately after an expression that evaluates as a boolean value, or by passing the expression as a parameter to `not_`.\n\nYou may also use `~` as a shorthand operator.\n\n*Example:* Not true is false.\n\n>>> r.not_(True).run(conn)\n... r.expr(True).not_().run(conn)\n... (~r.expr(True)).run(conn)\n\nThese evaluate to `false`.\n\nNote that when using `~` the expression is wrapped in parentheses. Without this, Python will evaluate `r.expr(True)` *first* rather than using the ReQL operator and return an incorrect value. (`~True` evaluates to −2 in Python.)\n\n*Example:* Return all the users that do not have a "flag" field.\n\n>>> r.table(\'users\').filter(\n... lambda users: (~users.has_fields(\'flag\'))\n... ).run(conn)\n\n*Example:* As above, but prefix-style.\n\n>>> r.table(\'users\').filter(\n... lambda users: r.not_(users.has_fields(\'flag\'))\n... ).run(conn)\n'
get_unbound_func(rethinkdb.ast.RqlQuery.not_).__doc__ = u'Compute the logical inverse (not) of an expression.\n\n`not_` can be called either via method chaining, immediately after an expression that evaluates as a boolean value, or by passing the expression as a parameter to `not_`.\n\nYou may also use `~` as a shorthand operator.\n\n*Example:* Not true is false.\n\n>>> r.not_(True).run(conn)\n... r.expr(True).not_().run(conn)\n... (~r.expr(True)).run(conn)\n\nThese evaluate to `false`.\n\nNote that when using `~` the expression is wrapped in parentheses. Without this, Python will evaluate `r.expr(True)` *first* rather than using the ReQL operator and return an incorrect value. (`~True` evaluates to −2 in Python.)\n\n*Example:* Return all the users that do not have a "flag" field.\n\n>>> r.table(\'users\').filter(\n... lambda users: (~users.has_fields(\'flag\'))\n... ).run(conn)\n\n*Example:* As above, but prefix-style.\n\n>>> r.table(\'users\').filter(\n... lambda users: r.not_(users.has_fields(\'flag\'))\n... ).run(conn)\n'
(rethinkdb.not_).__doc__ = u'Compute the logical inverse (not) of an expression.\n\n`not_` can be called either via method chaining, immediately after an expression that evaluates as a boolean value, or by passing the expression as a parameter to `not_`.\n\nYou may also use `~` as a shorthand operator.\n\n*Example:* Not true is false.\n\n>>> r.not_(True).run(conn)\n... r.expr(True).not_().run(conn)\n... (~r.expr(True)).run(conn)\n\nThese evaluate to `false`.\n\nNote that when using `~` the expression is wrapped in parentheses. Without this, Python will evaluate `r.expr(True)` *first* rather than using the ReQL operator and return an incorrect value. (`~True` evaluates to −2 in Python.)\n\n*Example:* Return all the users that do not have a "flag" field.\n\n>>> r.table(\'users\').filter(\n... lambda users: (~users.has_fields(\'flag\'))\n... ).run(conn)\n\n*Example:* As above, but prefix-style.\n\n>>> r.table(\'users\').filter(\n... lambda users: r.not_(users.has_fields(\'flag\'))\n... ).run(conn)\n'
get_unbound_func(rethinkdb.ast.RqlQuery.__or__).__doc__ = u'Compute the logical or of two values.\n\n*Example:* True or false ored is true?\n\n>>> (r.expr(True) | False).run(conn)\n... r.expr(True).or_(False).run(conn)\n... r.or_(True, False).run(conn)\n'
(rethinkdb.or_).__doc__ = u'Compute the logical or of two values.\n\n*Example:* True or false ored is true?\n\n>>> (r.expr(True) | False).run(conn)\n... r.expr(True).or_(False).run(conn)\n... r.or_(True, False).run(conn)\n'
get_unbound_func(rethinkdb.ast.RqlQuery.__sub__).__doc__ = u"Subtract two numbers.\n\n*Example:* It's as easy as 2 - 2 = 0.\n\n>>> (r.expr(2) - 2).run(conn)\n\n*Example:* Create a date one year ago today.\n\n>>> r.now() - 365*24*60*60\n\n*Example:* Retrieve how many seconds elapsed between today and date\n\n>>> r.now() - date\n\n"
(rethinkdb.sub).__doc__ = u"Subtract two numbers.\n\n*Example:* It's as easy as 2 - 2 = 0.\n\n>>> (r.expr(2) - 2).run(conn)\n\n*Example:* Create a date one year ago today.\n\n>>> r.now() - 365*24*60*60\n\n*Example:* Retrieve how many seconds elapsed between today and date\n\n>>> r.now() - date\n\n"
get_unbound_func(rethinkdb.ast.Table.between).__doc__ = u'Get all documents between two keys. Accepts three optional arguments: `index`,\n`left_bound`, and `right_bound`. If `index` is set to the name of a secondary index,\n`between` will return all documents where that index\'s value is in the specified range\n(it uses the primary key by default). `left_bound` or `right_bound` may be set to `open`\nor `closed` to indicate whether or not to include that endpoint of the range (by default,\n`left_bound` is closed and `right_bound` is open).\n\n*Example:* Find all users with primary key >= 10 and < 20 (a normal half-open interval).\n\n>>> r.table(\'marvel\').between(10, 20).run(conn)\n\n*Example:* Find all users with primary key >= 10 and <= 20 (an interval closed on\nboth sides).\n\n>>> r.table(\'marvel\').between(10, 20, right_bound=\'closed\').run(conn)\n\n*Example:* Find all users with primary key < 20. (You can use `None` to mean\n"unbounded" for either endpoint.)\n\n>>> r.table(\'marvel\').between(None, 20, right_bound=\'closed\').run(conn)\n\n*Example:* Between can be used on secondary indexes too. Just pass an optional index\nargument giving the secondary index to query.\n\n>>> r.table(\'dc\').between(\'dark_knight\', \'man_of_steel\', index=\'code_name\').run(conn)\n\n'
(rethinkdb.db).__doc__ = u"Reference a database.\n\n*Example:* Before we can query a table we have to select the correct database.\n\n>>> r.db('heroes').table('marvel').run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.filter).__doc__ = u'Get all the documents for which the given predicate is true.\n\n`filter` can be called on a sequence, selection, or a field containing an array of\nelements. The return type is the same as the type on which the function was called on.\n\nThe body of every filter is wrapped in an implicit `.default(False)`, which means that\nif a non-existence errors is thrown (when you try to access a field that does not exist\nin a document), RethinkDB will just ignore the document.\nThe `default` value can be changed by passing the named argument `default`.\nSetting this optional argument to `r.error()` will cause any non-existence errors to\nreturn a `RqlRuntimeError`.\n\n*Example:* Get all the users that are 30 years old.\n\n>>> r.table(\'users\').filter({"age": 30}).run(conn)\n\nA more general way to write the previous query is to use `r.row`.\n\n>>> r.table(\'users\').filter(r.row["age"] == 30).run(conn)\n\nHere the predicate is `r.row["age"] == 30`.\n\n- `r.row` refers to the current document\n- `r.row["age"]` refers to the field `age` of the current document\n- `r.row["age"] == 30` returns `True` if the field `age` is 30\n\nAn even more general way to write the same query is to use a lambda function.\nRead the documentation about [r.row](../row/) to know more about the differences\nbetween `r.row` and lambda functions in ReQL.\n\n>>> r.table(\'users\').filter(lambda user:\n... user["age"] == 30\n... ).run(conn)\n\n*Example:* Get all the users that are more than 18 years old.\n\n>>> r.table("users").filter(r.row["age"] > 18).run(conn)\n\n*Example:* Get all the users that are less than 18 years old and more than 13 years old.\n\n>>> r.table("users").filter((r.row["age"] < 18) & (r.row["age"] > 13)).run(conn)\n\n*Example:* Get all the users that are more than 18 years old or have their parental consent.\n\n>>> r.table("users").filter((r.row["age"].lt(18)) | (r.row["hasParentalConsent"])).run(conn)\n\n*Example:* Get all the users that are less than 18 years old or whose age is unknown\n(field `age` missing).\n\n>>> r.table("users").filter(r.row["age"] < 18, default=True).run(conn)\n\n*Example:* Get all the users that are more than 18 years old. Throw an error if a\ndocument is missing the field `age`.\n\n>>> r.table("users").filter(r.row["age"] > 18, default=r.error()).run(conn)\n\n*Example:* Select all users who have given their phone number (all the documents\nwhose field `phone_number` is defined and not `None`).\n\n>>> r.table(\'users\').filter(lambda user:\n... user.has_fields(\'phone_number\')\n... ).run(conn)\n\n*Example:* Retrieve all the users who subscribed between January 1st, 2012\n(included) and January 1st, 2013 (excluded).\n\n>>> r.table("users").filter(lambda user:\n... user["subscription_date"].during( r.time(2012, 1, 1, \'Z\'), r.time(2013, 1, 1, \'Z\') )\n... ).run(conn)\n\n*Example:* Retrieve all the users who have a gmail account (whose field `email` ends\nwith `@gmail.com`).\n\n>>> r.table("users").filter(lambda user:\n... user["email"].match("@gmail.com$")\n... ).run(conn)\n\n*Example:* Filter based on the presence of a value in an array.\n\nSuppose the table `users` has the following schema\n\n>>> {\n... "name": <type \'str\'>\n... "places_visited": [<type \'str\'>]\n... }\n\nRetrieve all the users whose field `places_visited` contains `France`.\n\n>>> r.table("users").filter(lambda user:\n... user["places_visited"].contains("France")\n... ).run(conn)\n\n*Example:* Filter based on nested fields.\n\nSuppose we have a table `users` containing documents with the following schema.\n\n>>> {\n... "id": <type \'str\'>\n... "name": {\n... "first": <type \'str\'>,\n... "middle": <type \'str\'>,\n... "last": <type \'str\'>\n... }\n... }\n\nRetrieve all users named "William Adama" (first name "William", last name\n"Adama"), with any middle name.\n\n>>> r.table("users").filter({\n... "name":{\n... "first": "William",\n... "last": "Adama"\n... }\n... }).run(conn)\n\nIf you want an exact match for a field that is an object, you will have to use `r.literal`.\n\nRetrieve all users named "William Adama" (first name "William", last name\n"Adama"), and who do not have a middle name.\n\n>>> r.table("users").filter(r.literal({\n... "name":{\n... "first": "William",\n... "last": "Adama"\n... }\n... })).run(conn)\n\nThe equivalent queries with a lambda function.\n\n>>> r.table("users").filter(lambda user:\n... (user["name"]["first"] == "William")\n... & (user["name"]["last"] == "Adama")\n... ).run(conn)\n\n>>> r.table("users").filter(lambda user:\n... user["name"] == {\n... "first": "William",\n... "last": "Adama"\n... }\n... ).run(conn)\n'
get_unbound_func(rethinkdb.ast.Table.get).__doc__ = u"Get a document by primary key.\n\nIf no document exists with that primary key, `get` will return `None`.\n\n*Example:* Find a document by UUID.\n\n>>> r.table('posts').get('a9849eef-7176-4411-935b-79a6e3c56a74').run(conn)\n\n*Example:* Find a document and merge another document with it.\n\n>>> r.table('heroes').get(3).merge(\n... { 'powers': ['invisibility', 'speed'] }\n... ).run(conn)\n"
get_unbound_func(rethinkdb.ast.Table.get_all).__doc__ = u'Get all documents where the given value matches the value of the requested index.\n\n*Example:* Secondary index keys are not guaranteed to be unique so we cannot query via\n"get" when using a secondary index.\n\n>>> r.table(\'marvel\').get_all(\'man_of_steel\', index=\'code_name\').run(conn)\n\n*Example:* Without an index argument, we default to the primary index. While `get` will either return the document or `None` when no document with such a primary key value exists, this will return either a one or zero length stream.\n\n>>> r.table(\'dc\').get_all(\'superman\').run(conn)\n\n*Example:* You can get multiple documents in a single call to `get_all`.\n\n>>> r.table(\'dc\').get_all(\'superman\', \'ant man\').run(conn)\n\n'
get_unbound_func(rethinkdb.ast.DB.table).__doc__ = u"Select all documents in a table. This command can be chained with other commands to do\nfurther processing on the data.\n\n*Example:* Return all documents in the table 'marvel' of the default database.\n\n>>> r.table('marvel').run(conn)\n\n*Example:* Return all documents in the table 'marvel' of the database 'heroes'.\n\n>>> r.db('heroes').table('marvel').run(conn)\n\n*Example:* If you are OK with potentially out of date data from this table and want\npotentially faster reads, pass a flag allowing out of date data.\n\n>>> r.db('heroes').table('marvel', True).run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.downcase).__doc__ = u'Downcases a string.\n\n*Example:*\n\n>>> > r.expr("Sentence about LaTeX.").downcase().run(conn)\n... "sentence about latex."\n'
get_unbound_func(rethinkdb.ast.RqlQuery.match).__doc__ = u'Matches against a regular expression. If there is a match, returns an object with the fields:\n\n- `str`: The matched string\n- `start`: The matched string\'s start\n- `end`: The matched string\'s end\n- `groups`: The capture groups defined with parentheses\n\nIf no match is found, returns `None`.\n\nAccepts RE2 syntax\n([https://code.google.com/p/re2/wiki/Syntax](https://code.google.com/p/re2/wiki/Syntax)).\nYou can enable case-insensitive matching by prefixing the regular expression with\n`(?i)`. See the linked RE2 documentation for more flags.\n\nThe `match` command does not support backreferences.\n\n*Example:* Get all users whose name starts with "A". Because `None` evaluates to `false` in\n`filter`, you can just use the result of `match` for the predicate.\n\n>>> r.table(\'users\').filter(lambda doc:\n... doc[\'name\'].match("^A")\n... ).run(conn)\n\n*Example:* Get all users whose name ends with "n".\n\n>>> r.table(\'users\').filter(lambda doc:\n... doc[\'name\'].match("n$")\n... ).run(conn)\n\n*Example:* Get all users whose name has "li" in it\n\n>>> r.table(\'users\').filter(lambda doc:\n... doc[\'name\'].match("li")\n... ).run(conn)\n\n*Example:* Get all users whose name is "John" with a case-insensitive search.\n\n>>> r.table(\'users\').filter(lambda doc:\n... doc[\'name\'].match("(?i)^john$")\n... ).run(conn)\n\n*Example:* Get all users whose name is composed of only characters between "a" and "z".\n\n>>> r.table(\'users\').filter(lambda doc:\n... doc[\'name\'].match("(?i)^[a-z]+$")\n... ).run(conn)\n\n*Example:* Get all users where the zipcode is a string of 5 digits.\n\n>>> r.table(\'users\').filter(lambda doc:\n... doc[\'zipcode\'].match("\\d{5}")\n... ).run(conn)\n\n*Example:* Retrieve the domain of a basic email\n\n>>> r.expr("name@domain.com").match(".*@(.*)").run(conn)\n\nResult:\n\n>>> {\n... "start": 0,\n... "end": 20,\n... "str": "name@domain.com",\n... "groups":[\n... {\n... "end": 17,\n... "start": 7,\n... "str": "domain.com"\n... }\n... ]\n... }\n\nYou can then retrieve only the domain with the [\\[\\]](/api/python/get_field) selector.\n\n>>> r.expr("name@domain.com").match(".*@(.*)")["groups"][0]["str"].run(conn)\n\nReturns `\'domain.com\'`\n\n*Example:* Fail to parse out the domain and returns `None`.\n\n>>> r.expr("name[at]domain.com").match(".*@(.*)").run(conn)\n'
get_unbound_func(rethinkdb.ast.RqlQuery.split).__doc__ = u'Splits a string into substrings. Splits on whitespace when called\nwith no arguments. When called with a separator, splits on that\nseparator. When called with a separator and a maximum number of\nsplits, splits on that separator at most `max_splits` times. (Can be\ncalled with `None` as the separator if you want to split on whitespace\nwhile still specifying `max_splits`.)\n\nMimics the behavior of Python\'s `string.split` in edge cases, except\nfor splitting on the empty string, which instead produces an array of\nsingle-character strings.\n\n*Example:* Split on whitespace.\n\n>>> > r.expr("foo bar bax").split().run(conn)\n... ["foo", "bar", "bax"]\n\n*Example:* Split the entries in a CSV file.\n\n>>> > r.expr("12,37,,22,").split(",").run(conn)\n... ["12", "37", "", "22", ""]\n\n*Example:* Split a string into characters.\n\n>>> > r.expr("mlucy").split("").run(conn)\n... ["m", "l", "u", "c", "y"]\n\n*Example:* Split the entries in a CSV file, but only at most 3\ntimes.\n\n>>> > r.expr("12,37,,22,").split(",", 3).run(conn)\n... ["12", "37", "", "22,"]\n\n*Example:* Split on whitespace at most once (i.e. get the first word).\n\n>>> > r.expr("foo bar bax").split(None, 1).run(conn)\n... ["foo", "bar bax"]\n'
get_unbound_func(rethinkdb.ast.RqlQuery.upcase).__doc__ = u'Upcases a string.\n\n*Example:*\n\n>>> > r.expr("Sentence about LaTeX.").upcase().run(conn)\n... "SENTENCE ABOUT LATEX."\n'
get_unbound_func(rethinkdb.ast.RqlQuery.concat_map).__doc__ = u'Concatenate one or more elements into a single sequence using a mapping function.\n\n`concat_map` works in a similar fashion to `map`, applying the given function to each element in a sequence, but it will always return a single sequence. If the mapping function returns a sequence, `map` would produce a sequence of sequences:\n\n>>> r.expr([1, 2, 3]).map(lambda x: [x, x.mul(2)]).run(conn)\n\nResult:\n\n[[1, 2], [2, 4], [3, 6]]\n\nWhereas `concat_map` with the same mapping function would merge those sequences into one:\n\n>>> r.expr([1, 2, 3]).concat_map(lambda x: [x, x.mul(2)]).run(conn)\n\nResult:\n\n[1, 2, 2, 4, 3, 6]\n\nThe return value, array or stream, will be the same type as the input.\n\n*Example:* Construct a sequence of all monsters defeated by Marvel heroes. The field "defeatedMonsters" is an array of one or more monster names.\n\n>>> r.table(\'marvel\').concat_map(lambda hero: hero[\'defeatedMonsters\']).run(conn)\n\n*Example:* Simulate an [eq_join](/api/python/eq_join/) using `concat_map`. (This is how ReQL joins are implemented internally.)\n\n>>> r.table(\'posts\').concat_map(\n... lambda post: r.table(\'comments\').get_all(\n... post[\'id\'], index=\'post_id\'\n... ).map(\n... lambda comment: { \'left\': post, \'right\': comment}\n... )\n... ).run(conn)\n'
get_unbound_func(rethinkdb.ast.RqlQuery.indexes_of).__doc__ = u"Get the indexes of an element in a sequence. If the argument is a predicate, get the indexes of all elements matching it.\n\n*Example:* Find the position of the letter 'c'.\n\n>>> r.expr(['a','b','c']).indexes_of('c').run(conn)\n\n*Example:* Find the popularity ranking of invisible heroes.\n\n>>> r.table('marvel').union(r.table('dc')).order_by('popularity').indexes_of(\n... r.row['superpowers'].contains('invisibility')\n... ).run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.is_empty).__doc__ = u"Test if a sequence is empty.\n\n*Example:* Are there any documents in the marvel table?\n\n>>> r.table('marvel').is_empty().run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.limit).__doc__ = u"End the sequence after the given number of elements.\n\n*Example:* Only so many can fit in our Pantheon of heroes.\n\n>>> r.table('marvel').order_by('belovedness').limit(10).run(conn)\n"
get_unbound_func(rethinkdb.ast.RqlQuery.map).__doc__ = u"Transform each element of the sequence by applying the given mapping function.\n\n*Example:* Construct a sequence of hero power ratings.\n\n>>> r.table('marvel').map(lambda hero:\n... hero['combatPower'] + hero['compassionPower'] * 2\n... ).run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.nth).__doc__ = u'Get the nth element of a sequence.\n\n*Example:* Select the second element in the array.\n\n>>> r.expr([1,2,3])[1].run(conn)\n... r.expr([1,2,3]).nth(1).run(conn)\n'
get_unbound_func(rethinkdb.ast.RqlQuery.order_by).__doc__ = u'Sort the sequence by document values of the given key(s). To specify\nthe ordering, wrap the attribute with either `r.asc` or `r.desc`\n(defaults to ascending).\n\nSorting without an index requires the server to hold the sequence in\nmemory, and is limited to 100,000 documents. Sorting with an index can\nbe done on arbitrarily large tables, or after a `between` command\nusing the same index.\n\n*Example:* Order all the posts using the index `date`. \n\n>>> r.table(\'posts\').order_by(index=\'date\').run(conn)\n\nThe index must have been previously created with [index_create](/api/python/index_create/).\n\n>>> r.table(\'posts\').index_create(\'date\').run(conn)\n\nYou can also select a descending ordering:\n\n>>> r.table(\'posts\').order_by(index=r.desc(\'date\')).run(conn, callback)\n\n*Example:* If you have a sequence with less than 100,000 documents, you can order it\nwithout an index.\n\n>>> r.table(\'posts\').get(1)[\'comments\'].order_by(\'date\')\n\nYou can also select a descending ordering:\n\n>>> r.table(\'posts\').get(1)[\'comments\'].order_by(r.desc(\'date\'))\n\nIf you\'re doing ad-hoc analysis and know your table won\'t have more then 100,000\nelements you can run `order_by` without an index:\n\n>>> r.table(\'small_table\').order_by(\'date\')\n\n*Example:* You can efficiently order using multiple fields by using a\n[compound index](http://www.rethinkdb.com/docs/secondary-indexes/python/).\n\nOrder by date and title.\n\n>>> r.table(\'posts\').order_by(index=\'date_and_title\').run(conn)\n\nThe index must have been previously created with [index_create](/api/python/index_create/).\n\n>>> r.table(\'posts\').index_create(\'date_and_title\', lambda post:\n... [post["date"], post["title"]]).run(conn)\n\n_Note_: You cannot specify multiple orders in a compound index. See [issue #2306](https://github.com/rethinkdb/rethinkdb/issues/2306)\nto track progress.\n\n*Example:* If you have a sequence with less than 100,000 documents, you can order it\nby multiple fields without an index.\n\nr.table(\'small_table\').orderBy(\'date\', r.desc(\'title\'))\n\n*Example:* Notice that an index ordering always has highest\nprecedence. The following query orders posts by date, and if multiple\nposts were published on the same date, they will be ordered by title.\n\n>>> r.table(\'post\').order_by(\'title\', index=\'date\').run(conn)\n\n*Example:* You can efficiently order data on arbitrary expressions using indexes.\n\n>>> r.table(\'posts\').order_by(index=\'votes\').run(conn)\n\nThe index must have been previously created with [index_create](/api/ruby/index_create/).\n\n>>> r.table(\'posts\').index_create(\'votes\', lambda post:\n... post["upvotes"]-post["downvotes"]\n... ).run(conn)\n\n*Example:* If you have a sequence with less than 100,000 documents, you can order it with an arbitrary function directly.\n\n>>> r.table(\'small_table\').order_by(lambda doc:\n... doc[\'upvotes\']-doc[\'downvotes\']\n... );\n\nYou can also select a descending ordering:\n\n>>> r.table(\'small_table\').order_by(r.desc(lambda doc:\n... doc[\'upvotes\']-doc[\'downvotes\']\n... ));\n\n*Example:* Ordering after a `between` command can be done as long as the same index is being used.\n\n>>> r.table("posts").between(r.time(2013, 1, 1, \'+00:00\'), r.time(2013, 1, 1, \'+00:00\'), index=\'date\')\n... .order_by(index=\'date\').run(conn);\n\n'
get_unbound_func(rethinkdb.ast.RqlQuery.sample).__doc__ = u"Select a given number of elements from a sequence with uniform random distribution. Selection is done without replacement.\n\n*Example:* Select 3 random heroes.\n\n>>> r.table('marvel').sample(3).run(conn)\n"
get_unbound_func(rethinkdb.ast.RqlQuery.skip).__doc__ = u"Skip a number of elements from the head of the sequence.\n\n*Example:* Here in conjunction with `order_by` we choose to ignore the most successful heroes.\n\n>>> r.table('marvel').order_by('successMetric').skip(10).run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.slice).__doc__ = u"Return the elements of a sequence within the specified range.\n\n`slice` returns the range between `start_index` and `end_index`. If only `start_index` is specified, `slice` returns the range from that index to the end of the sequence. Specify `left_bound` or `right_bound` as `open` or `closed` to indicate whether to include that endpoint of the range by default: `closed` returns that endpoint, while `open` does not. By default, `left_bound` is closed and `right_bound` is open, so the range `(10,13)` will return the tenth, eleventh and twelfth elements in the sequence.\n\nIf `end_index` is past the end of the sequence, all elements from `start_index` to the end of the sequence will be returned. If `start_index` is past the end of the sequence or `end_index` is less than `start_index`, a zero-element sequence will be returned (although see below for negative `end_index` values). An error will be raised on a negative `start_index`.\n\nA negative `end_index` is allowed with arrays; in that case, the returned range counts backward from the array's end. That is, the range of `(2,-1)` returns the second element through the next-to-last element of the range. A negative `end_index` is not allowed with a stream. (An `end_index` of −1 *is* allowed with a stream if `right_bound` is closed; this behaves as if no `end_index` was specified.)\n\nIf you are only specifying the indexes and not the bounding options, you may use Python's slice operator as a shorthand: `[start_index:end_index]`.\n\n**Example:** Return the fourth, fifth and sixth youngest players. (The youngest player is at index 0, so those are elements 3–5.)\n\n>>> r.table('players').order_by(index='age').slice(3,6).run(conn)\n\nOr, using Python's slice operator:\n\n>>> r.table('players').filter({'class': 'amateur'})[10:20].run(conn)\n\n**Example:** Return all but the top three players who have a red flag.\n\n>>> r.table('players').filter({'flag': 'red'}).order_by(index=r.desc('score')).slice(3).run(conn)\n\n**Example:** Return holders of tickets `X` through `Y`, assuming tickets are numbered sequentially. We want to include ticket `Y`.\n\n>>> r.table('users').order_by(index='ticket').slice(x, y, right_bound='closed').run(conn)\n\n**Example:** Return the elements of an array from the second through two from the end (that is, not including the last two).\n\n>>> r.expr([0,1,2,3,4,5]).slice(2,-2).run(conn)\n\nResult:\n\n>>> [2,3]\n"
get_unbound_func(rethinkdb.ast.RqlQuery.union).__doc__ = u"Concatenate two sequences.\n\n*Example:* Construct a stream of all heroes.\n\n>>> r.table('marvel').union(r.table('dc')).run(conn)\n\n"
get_unbound_func(rethinkdb.ast.RqlQuery.with_fields).__doc__ = u"Takes a sequence of objects and a list of fields. If any objects in the sequence don't\nhave all of the specified fields, they're dropped from the sequence. The remaining\nobjects have the specified fields plucked out. (This is identical to `has_fields`\nfollowed by `pluck` on a sequence.)\n\n*Example:* Get a list of heroes and their nemeses, excluding any heroes that lack one.\n\n>>> r.table('marvel').with_fields('id', 'nemesis')\n\n*Example:* Get a list of heroes and their nemeses, excluding any heroes whose nemesis isn't in an evil organization.\n\n>>> r.table('marvel').with_fields('id', {'nemesis' : {'evil_organization' : True}})\n\n*Example:* The nested syntax can quickly become overly verbose so there's a shorthand.\n\n>>> r.table('marvel').with_fields('id', {'nemesis' : 'evil_organization'})\n\n"
get_unbound_func(rethinkdb.ast.Table.delete).__doc__ = u'Delete one or more documents from a table.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override the\ntable or query\'s durability setting (set in [run](/api/python/run/)). \nIn soft durability mode RethinkDB will acknowledge the write immediately after\nreceiving it, but before the write has been committed to disk.\n- `return_vals`: if set to `True` and in case of a single document deletion, the deleted\ndocument will be returned.\n\nDelete returns an object that contains the following attributes:\n\n- `deleted`: the number of documents that were deleted.\n- `skipped`: the number of documents that were skipped. \nFor example, if you attempt to delete a batch of documents, and another concurrent query\ndeletes some of those documents first, they will be counted as skipped.\n- `errors`: the number of errors encountered while performing the delete.\n- `first_error`: If errors were encountered, contains the text of the first error.\n- `inserted`, `replaced`, and `unchanged`: all 0 for a delete operation..\n- `old_val`: if `return_vals` is set to `True`, contains the deleted document.\n- `new_val`: if `return_vals` is set to `True`, contains `None`.\n\n*Example:* Delete a single document from the table `comments`.\n\n>>> r.table("comments").get("7eab9e63-73f1-4f33-8ce4-95cbea626f59").delete().run(conn)\n\n*Example:* Delete all documents from the table `comments`.\n\n>>> r.table("comments").delete().run(conn)\n\n*Example:* Delete all comments where the field `id_post` is `3`.\n\n>>> r.table("comments").filter({"id_post": 3}).delete().run(conn)\n\n*Example:* Delete a single document from the table `comments` and return its value.\n\n>>> r.table("comments").get("7eab9e63-73f1-4f33-8ce4-95cbea626f59").delete(return_vals=True).run(conn)\n\nThe result look like:\n\n>>> {\n... "deleted": 1,\n... "errors": 0,\n... "inserted": 0,\n... "new_val": None,\n... "old_val": {\n... "id": "7eab9e63-73f1-4f33-8ce4-95cbea626f59",\n... "author": "William",\n... "comment": "Great post",\n... "id_post": 3\n... },\n... "replaced": 0,\n... "skipped": 0,\n... "unchanged": 0\n... }\n\n*Example:* Delete all documents from the table `comments` without waiting for the\noperation to be flushed to disk.\n\n>>> r.table("comments").delete(durability="soft"}).run(conn)\n'
get_unbound_func(rethinkdb.ast.Table.insert).__doc__ = u'Insert documents into a table. Accepts a single document or an array of\ndocuments.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override the\ntable or query\'s durability setting (set in [run](/api/python/run/)). \nIn soft durability mode RethinkDB will acknowledge the write immediately after\nreceiving it, but before the write has been committed to disk.\n- `return_vals`: if set to `True` and in case of a single insert/upsert, the inserted/updated\ndocument will be returned.\n- `upsert`: when set to `True`, performs a [replace](/api/python/replace/) if a\ndocument with the same primary key exists.\n\nInsert returns an object that contains the following attributes:\n\n- `inserted`: the number of documents that were succesfully inserted.\n- `replaced`: the number of documents that were updated when upsert is used.\n- `unchanged`: the number of documents that would have been modified, except that the\nnew value was the same as the old value when doing an upsert.\n- `errors`: the number of errors encountered while performing the insert.\n- `first_error`: If errors were encountered, contains the text of the first error.\n- `deleted` and `skipped`: 0 for an insert operation.\n- `generated_keys`: a list of generated primary keys in case the primary keys for some\ndocuments were missing (capped to 100000).\n- `warnings`: if the field `generated_keys` is truncated, you will get the warning _"Too\nmany generated keys (<X>), array truncated to 100000."_.\n- `old_val`: if `return_vals` is set to `True`, contains `None`.\n- `new_val`: if `return_vals` is set to `True`, contains the inserted/updated document.\n\n*Example:* Insert a document into the table `posts`.\n\n>>> r.table("posts").insert({\n... "id": 1,\n... "title": "Lorem ipsum",\n... "content": "Dolor sit amet"\n... }).run(conn)\n\nThe result will be:\n\n>>> {\n... "deleted": 0,\n... "errors": 0,\n... "inserted": 1,\n... "replaced": 0,\n... "skipped": 0,\n... "unchanged": 0\n... }\n\n*Example:* Insert a document without a defined primary key into the table `posts` where the\nprimary key is `id`.\n\n>>> r.table("posts").insert({\n... "title": "Lorem ipsum",\n... "content": "Dolor sit amet"\n... }).run(conn)\n\nRethinkDB will generate a primary key and return it in `generated_keys`.\n\n>>> {\n... "deleted": 0,\n... "errors": 0,\n... "generated_keys": [\n... "dd782b64-70a7-43e4-b65e-dd14ae61d947"\n... ],\n... "inserted": 1,\n... "replaced": 0,\n... "skipped": 0,\n... "unchanged": 0\n... }\n\nRetrieve the document you just inserted with:\n\n>>> r.table("posts").get("dd782b64-70a7-43e4-b65e-dd14ae61d947").run(conn)\n\nAnd you will get back:\n\n>>> {\n... "id": "dd782b64-70a7-43e4-b65e-dd14ae61d947",\n... "title": "Lorem ipsum",\n... "content": "Dolor sit amet",\n... }\n\n*Example:* Insert multiple documents into the table `users`.\n\n>>> r.table("users").insert([\n... {"id": "william", "email": "william@rethinkdb.com"},\n... {"id": "lara", "email": "lara@rethinkdb.com"}\n... ]).run(conn)\n\n*Example:* Insert a document into the table `users`, replacing the document if the document\nalready exists. \n_Note_: If the document exists, the `insert` command will behave like [replace](/api/python/replace/), not like [update](/api/python/update/) \n\n>>> r.table("users").insert(\n... {"id": "william", "email": "william@rethinkdb.com"},\n... upsert=True\n... ).run(conn)\n\n*Example:* Copy the documents from `posts` to `posts_backup`.\n\n>>> r.table("posts_backup").insert( r.table("posts") ).run(conn)\n\n*Example:* Get back a copy of the inserted document (with its generated primary key).\n\n>>> r.table("posts").insert(\n... {"title": "Lorem ipsum", "content": "Dolor sit amet"},\n... return_vals=True\n... ).run(conn)\n\nThe result will be\n\n>>> {\n... "deleted": 0,\n... "errors": 0,\n... "generated_keys": [\n... "dd782b64-70a7-43e4-b65e-dd14ae61d947"\n... ],\n... "inserted": 1,\n... "replaced": 0,\n... "skipped": 0,\n... "unchanged": 0,\n... "old_val": None,\n... "new_val": {\n... "id": "dd782b64-70a7-43e4-b65e-dd14ae61d947",\n... "title": "Lorem ipsum",\n... "content": "Dolor sit amet"\n... }\n... }\n'
get_unbound_func(rethinkdb.ast.Table.replace).__doc__ = u'Replace documents in a table. Accepts a JSON document or a ReQL expression, and replaces\nthe original document with the new one. The new document must have the same primary key\nas the original document.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override the\ntable or query\'s durability setting (set in [run](/api/python/run/)). \nIn soft durability mode RethinkDB will acknowledge the write immediately after\nreceiving it, but before the write has been committed to disk.\n- `return_vals`: if set to `True` and in case of a single replace, the replaced document\nwill be returned.\n- `non_atomic`: set to `True` if you want to perform non-atomic replaces (replaces that\nrequire fetching data from another document).\n\nReplace returns an object that contains the following attributes:\n\n- `replaced`: the number of documents that were replaced\n- `unchanged`: the number of documents that would have been modified, except that the\nnew value was the same as the old value\n- `inserted`: the number of new documents added. You can have new documents inserted if\nyou do a point-replace on a key that isn\'t in the table or you do a replace on a\nselection and one of the documents you are replacing has been deleted\n- `deleted`: the number of deleted documents when doing a replace with `None`\n- `errors`: the number of errors encountered while performing the replace.\n- `first_error`: If errors were encountered, contains the text of the first error.\n- `skipped`: 0 for a replace operation\n- `old_val`: if `return_vals` is set to `True`, contains the old document.\n- `new_val`: if `return_vals` is set to `True`, contains the new document.\n\n*Example:* Replace the document with the primary key `1`.\n\n>>> r.table("posts").get(1).replace({\n... "id": 1,\n... "title": "Lorem ipsum",\n... "content": "Aleas jacta est",\n... "status": "draft"\n... }).run(conn)\n\n*Example:* Remove the field `status` from all posts.\n\n>>> r.table("posts").replace(lambda post:\n... post.without("status")\n... ).run(conn)\n\n*Example:* Remove all the fields that are not `id`, `title` or `content`.\n\n>>> r.table("posts").replace(lambda post:\n... post.pluck("id", "title", "content")\n... ).run(conn)\n\n*Example:* Replace the document with the primary key `1` using soft durability.\n\n>>> r.table("posts").get(1).replace({\n... "id": 1,\n... "title": "Lorem ipsum",\n... "content": "Aleas jacta est",\n... "status": "draft"\n... }, durability="soft").run(conn)\n\n*Example:* Replace the document with the primary key `1` and return the values of the document before\nand after the replace operation.\n\n>>> r.table("posts").get(1).replace({\n... "id": 1,\n... "title": "Lorem ipsum",\n... "content": "Aleas jacta est",\n... "status": "published"\n... }, return_vals=True).run(conn)\n\nThe result will have two fields `old_val` and `new_val`.\n\n>>> {\n... "deleted": 0,\n... "errors": 0,\n... "inserted": 0,\n... "new_val": {\n... "id":1,\n... "title": "Lorem ipsum"\n... "content": "Aleas jacta est",\n... "status": "published",\n... },\n... "old_val": {\n... "id":1,\n... "title": "Lorem ipsum"\n... "content": "TODO",\n... "status": "draft",\n... "author": "William",\n... },\n... "replaced": 1,\n... "skipped": 0,\n... "unchanged": 0\n... }\n'
get_unbound_func(rethinkdb.ast.Table.sync).__doc__ = u'`sync` ensures that writes on a given table are written to permanent storage. Queries\nthat specify soft durability (`durability=\'soft\'`) do not give such guarantees, so\n`sync` can be used to ensure the state of these queries. A call to `sync` does not return\nuntil all previous writes to the table are persisted.\n\nIf successful, the operation returns an object: `{"synced": 1}`.\n\n*Example:* After having updated multiple heroes with soft durability, we now want to wait\nuntil these changes are persisted.\n\n>>> r.table(\'marvel\').sync().run(conn)\n\n'
get_unbound_func(rethinkdb.ast.Table.update).__doc__ = u'Update JSON documents in a table. Accepts a JSON document, a ReQL expression, or a\ncombination of the two.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override the\ntable or query\'s durability setting (set in [run](/api/python/run/)). \nIn soft durability mode RethinkDB will acknowledge the write immediately after\nreceiving it, but before the write has been committed to disk.\n- `return_vals`: if set to `True` and in case of a single update, the updated document\nwill be returned.\n- `non_atomic`: set to `True` if you want to perform non-atomic updates (updates that\nrequire fetching data from another document).\n\nUpdate returns an object that contains the following attributes:\n\n- `replaced`: the number of documents that were updated.\n- `unchanged`: the number of documents that would have been modified except the new\nvalue was the same as the old value.\n- `skipped`: the number of documents that were skipped because the document didn\'t exist.\n- `errors`: the number of errors encountered while performing the update.\n- `first_error`: If errors were encountered, contains the text of the first error.\n- `deleted` and `inserted`: 0 for an update operation.\n- `old_val`: if `return_vals` is set to `True`, contains the old document.\n- `new_val`: if `return_vals` is set to `True`, contains the new document.\n\n*Example:* Update the status of the post with `id` of `1` to `published`.\n\n>>> r.table("posts").get(1).update({"status": "published"}).run(conn)\n\n*Example:* Update the status of all posts to `published`.\n\n>>> r.table("posts").update({"status": "published"}).run(conn)\n\n*Example:* Update the status of all the post written by William.\n\n>>> r.table("posts").filter({"author": "William"}).update({"status": "published"}).run(conn)\n\n*Example:* Increment the field `view` with `id` of `1`.\nThis query will throw an error if the field `views` doesn\'t exist.\n\n>>> r.table("posts").get(1).update({\n... "views": r.row["views"]+1\n... }).run(conn)\n\n*Example:* Increment the field `view` of the post with `id` of `1`.\nIf the field `views` does not exist, it will be set to `0`.\n\n>>> r.table("posts").update({\n... "views": (r.row["views"]+1).default(0)\n... }).run(conn)\n\n*Example:* Perform a conditional update. \nIf the post has more than 100 views, set the `type` of a post to `hot`, else set it to `normal`.\n\n>>> r.table("posts").get(1).update(lambda post:\n... r.branch(\n... post["views"] > 100,\n... {"type": "hot"},\n... {"type": "normal"}\n... )\n... ).run(conn)\n\n*Example:* Update the field `num_comments` with the result of a sub-query. Because\nthis update is not atomic, you must pass the `non_atomic` flag.\n\n>>> r.table("posts").get(1).update({\n... "num_comments": r.table("comments").filter({"id_post": 1}).count()\n... }, non_atomic=True ).run(conn)\n\nIf you forget to specify the `non_atomic` flag, you will get a `RqlRuntimeError`.\n\nRqlRuntimeError: Could not prove function deterministic. Maybe you want to use the non_atomic flag? \n\n*Example:* Update the field `num_comments` with a random value between 0 and 100. \nThis update cannot be proven deterministic because of `r.js` (and in fact is not), so you\nmust pass the `non_atomic` flag.\n\n>>> r.table("posts").get(1).update({\n... "num_comments": r.js("Math.floor(Math.random()*100)")\n... }, non_atomic=True ).run(conn)\n\n*Example:* Update the status of the post with `id` of `1` using soft durability.\n\n>>> r.table("posts").get(1).update({status: "published"}, durability="soft").run(conn)\n\n*Example:* Increment the field `views` and return the values of the document before\nand after the update operation.\n\n>>> r.table("posts").get(1).update({\n... "views": r.row["views"]+1\n... }, return_vals=True).run(conn)\n\nThe result will have two fields `old_val` and `new_val`.\n\n>>> {\n... "deleted": 1,\n... "errors": 0,\n... "inserted": 0,\n... "new_val": {\n... "id": 1,\n... "author": "Julius_Caesar",\n... "title": "Commentarii de Bello Gallico",\n... "content": "Aleas jacta est",\n... "views": 207\n... },\n... "old_val": {\n... "id": 1,\n... "author": "Julius_Caesar",\n... "title": "Commentarii de Bello Gallico",\n... "content": "Aleas jacta est",\n... "views": 206\n... },\n... "replaced": 0,\n... "skipped": 0,\n... "unchanged": 0\n... }\n\n' | /rethinkdb-py3-0.2.tar.gz/rethinkdb-py3-0.2/rethinkdb/docs.py | 0.806014 | 0.239816 | docs.py | pypi |
import rethinkdb
docsSource = [
(rethinkdb.net.Connection.close, b'conn.close(noreply_wait=True)\n\nClose an open connection.\n\nClosing a connection normally waits until all outstanding requests have finished and then frees any open resources associated with the connection. By passing `False` to the `noreply_wait` optional argument, the connection will be closed immediately, possibly aborting any outstanding noreply writes.\n\nA noreply query is executed by passing the `noreply` option to the [run](http://rethinkdb.com/api/python/run/) command, indicating that `run()` should not wait for the query to complete before returning. You may also explicitly wait for a noreply query to complete by using the [noreply_wait](http://rethinkdb.com/api/python/noreply_wait) command.\n\n*Example* Close an open connection, waiting for noreply writes to finish.\n\n conn.close()\n\n*Example* Close an open connection immediately.\n\n conn.close(noreply_wait=False)\n'),
(rethinkdb.connect, b'r.connect(host="localhost", port=28015, db="test", auth_key="", timeout=20) -> connection\nr.connect(host) -> connection\n\nCreate a new connection to the database server. The keyword arguments are:\n\n- `host`: host of the RethinkDB instance. The default value is `localhost`.\n- `port`: the driver port, by default `28015`.\n- `db`: the database used if not explicitly specified in a query, by default `test`.\n- `auth_key`: the authentication key, by default the empty string.\n- `timeout`: timeout period in seconds for the connection to be opened (default `20`).\n- `ssl`: a hash of options to support SSL connections (default `None`). Currently, there is only one option available, and if the `ssl` option is specified, this key is required:\n - `ca_certs`: a path to the SSL CA certificate.\n\nIf the connection cannot be established, a `ReqlDriverError` exception will be thrown.\n\nThe authentication key can be set from the RethinkDB command line tool. Once set, client connections must provide the key as an option to `run` in order to make the connection. For more information, read "Using the RethinkDB authentication system" in the documentation on [securing your cluster](http://rethinkdb.com/docs/security/).\n\nThe RethinkDB Python driver includes support for asynchronous connections using Tornado and Twisted. Read the asynchronous connections documentation for more information.\n\n*Example* Open a connection using the default host and port, specifying the default database.\n\n conn = r.connect(db=\'marvel\')\n\n*Example* Open a new connection to the database.\n\n conn = r.connect(host=\'localhost\',\n port=28015,\n db=\'heroes\',\n auth_key=\'hunter2\')\n\n*Example* Open a new connection to the database using an SSL proxy.\n\n conn = r.connect(host=\'localhost\',\n port=28015,\n auth_key=\'hunter2\',\n ssl={\'ca_certs\': \'/path/to/ca.crt\'})\n\n*Example* Use a `with` statement to open a connection and pass it to a block. Using this style, the connection will be automatically closed when execution reaches the end of the block.\n\n with r.connect(db=\'marvel\') as conn:\n r.table(\'superheroes\').run(conn)\n'),
(rethinkdb.net.Connection.noreply_wait, b'conn.noreply_wait()\n\n`noreply_wait` ensures that previous queries with the `noreply` flag have been processed\nby the server. Note that this guarantee only applies to queries run on the given connection.\n\n*Example* We have previously run queries with the `noreply` argument set to `True`. Now\nwait until the server has processed them.\n\n conn.noreply_wait()\n\n'),
(rethinkdb, b'r -> r\n\nThe top-level ReQL namespace.\n\n*Example* Setup your top-level namespace.\n\n import rethinkdb as r\n\n'),
(rethinkdb.net.Connection.reconnect, b'conn.reconnect(noreply_wait=True)\n\nClose and reopen a connection.\n\nClosing a connection normally waits until all outstanding requests have finished and then frees any open resources associated with the connection. By passing `False` to the `noreply_wait` optional argument, the connection will be closed immediately, possibly aborting any outstanding noreply writes.\n\nA noreply query is executed by passing the `noreply` option to the [run](http://rethinkdb.com/api/python/run/) command, indicating that `run()` should not wait for the query to complete before returning. You may also explicitly wait for a noreply query to complete by using the [noreply_wait](http://rethinkdb.com/api/python/noreply_wait) command.\n\n*Example* Cancel outstanding requests/queries that are no longer needed.\n\n conn.reconnect(noreply_wait=False)\n'),
(rethinkdb.net.Connection.repl, b"conn.repl()\n\nSet the default connection to make REPL use easier. Allows calling\n`.run()` on queries without specifying a connection.\n\n__Note:__ Avoid using `repl` in application code. RethinkDB connection objects are not thread-safe, and calls to `connect` from multiple threads may change the global connection object used by `repl`. Applications should specify connections explicitly.\n\n*Example* Set the default connection for the REPL, then call\n`run()` without specifying the connection.\n\n r.connect(db='marvel').repl()\n r.table('heroes').run()\n"),
(rethinkdb.ast.RqlQuery.run, b'query.run(conn[, options]) -> cursor\nquery.run(conn[, options]) -> object\n\nRun a query on a connection, returning either a single JSON result or\na cursor, depending on the query.\n\nThe optional arguments are:\n\n- `read_mode`: One of three possible values affecting the consistency guarantee for the query (default: `\'single\'`).\n - `\'single\'` (the default) returns values that are in memory (but not necessarily written to disk) on the primary replica.\n - `\'majority\'` will only return values that are safely committed on disk on a majority of replicas. This requires sending a message to every replica on each read, so it is the slowest but most consistent.\n - `\'outdated\'` will return values that are in memory on an arbitrarily-selected replica. This is the fastest but least consistent.\n- `time_format`: what format to return times in (default: `\'native\'`).\n Set this to `\'raw\'` if you want times returned as JSON objects for exporting.\n- `profile`: whether or not to return a profile of the query\'s\n execution (default: `False`).\n- `durability`: possible values are `\'hard\'` and `\'soft\'`. In soft durability mode RethinkDB\nwill acknowledge the write immediately after receiving it, but before the write has\nbeen committed to disk.\n- `group_format`: what format to return `grouped_data` and `grouped_streams` in (default: `\'native\'`).\n Set this to `\'raw\'` if you want the raw pseudotype.\n- `noreply`: set to `True` to not receive the result object or cursor and return immediately.\n- `db`: the database to run this query against as a string. The default is the database specified in the `db` parameter to [connect](http://rethinkdb.com/api/python/connect/) (which defaults to `test`). The database may also be specified with the [db](http://rethinkdb.com/api/python/db/) command.\n- `array_limit`: the maximum numbers of array elements that can be returned by a query (default: 100,000). This affects all ReQL commands that return arrays. Note that it has no effect on the size of arrays being _written_ to the database; those always have an upper limit of 100,000 elements.\n- `binary_format`: what format to return binary data in (default: `\'native\'`). Set this to `\'raw\'` if you want the raw pseudotype.\n- `min_batch_rows`: minimum number of rows to wait for before batching a result set (default: 8). This is an integer.\n- `max_batch_rows`: maximum number of rows to wait for before batching a result set (default: unlimited). This is an integer.\n- `max_batch_bytes`: maximum number of bytes to wait for before batching a result set (default: 1MB). This is an integer.\n- `max_batch_seconds`: maximum number of seconds to wait before batching a result set (default: 0.5). This is a float (not an integer) and may be specified to the microsecond.\n- `first_batch_scaledown_factor`: factor to scale the other parameters down by on the first batch (default: 4). For example, with this set to 8 and `max_batch_rows` set to 80, on the first batch `max_batch_rows` will be adjusted to 10 (80 / 8). This allows the first batch to return faster.\n\n*Example* Run a query on the connection `conn` and print out every\nrow in the result.\n\n for doc in r.table(\'marvel\').run(conn):\n print doc\n\n*Example* If you are OK with potentially out of date data from all\nthe tables involved in this query and want potentially faster reads,\npass a flag allowing out of date data in an options object. Settings\nfor individual tables will supercede this global setting for all\ntables in the query.\n\n r.table(\'marvel\').run(conn, read_mode=\'outdated\')\n\n*Example* If you just want to send a write and forget about it, you\ncan set `noreply` to true in the options. In this case `run` will\nreturn immediately.\n\n r.table(\'marvel\').run(conn, noreply=True)\n\n*Example* If you want to specify whether to wait for a write to be\nwritten to disk (overriding the table\'s default settings), you can set\n`durability` to `\'hard\'` or `\'soft\'` in the options.\n\n r.table(\'marvel\')\n .insert({ \'superhero\': \'Iron Man\', \'superpower\': \'Arc Reactor\' })\n .run(conn, noreply=True, durability=\'soft\')\n\n*Example* If you do not want a time object to be converted to a\nnative date object, you can pass a `time_format` flag to prevent it\n(valid flags are "raw" and "native"). This query returns an object\nwith two fields (`epoch_time` and `$reql_type$`) instead of a native date\nobject.\n\n r.now().run(conn, time_format="raw")\n\n*Example* Specify the database to use for the query.\n\n for doc in r.table(\'marvel\').run(conn, db=\'heroes\'):\n print doc\n\nThis is equivalent to using the `db` command to specify the database:\n\n r.db(\'heroes\').table(\'marvel\').run(conn) ...\n\n*Example* Change the batching parameters for this query.\n\n r.table(\'marvel\').run(conn, max_batch_rows=16, max_batch_bytes=2048)\n'),
(rethinkdb.net.Connection.server, b'conn.server()\n\nReturn the server name and server UUID being used by a connection.\n\n*Example* Return the server name and UUID.\n\n > conn.server()\n \n { "id": "404bef53-4b2c-433f-9184-bc3f7bda4a15", "name": "amadeus" }\n'),
(rethinkdb.set_loop_type, b'r.set_loop_type(string)\n\nSet an asynchronous event loop model. There are two supported models:\n\n* `"tornado"`: use the Tornado web framework. Under this model, the connect and run commands will return Tornado `Future` objects.\n* `"twisted"`: use the Twisted networking engine. Under this model, the connect and run commands will return Twisted `Deferred` objects.\n\n*Example* Read a table\'s data using Tornado.\n\n r.set_loop_type("tornado")\n conn = r.connect(host=\'localhost\', port=28015)\n \n @gen.coroutine\n def use_cursor(conn):\n # Print every row in the table.\n cursor = yield r.table(\'test\').order_by(index="id").run(yield conn)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n print(item)\n\nFor a longer discussion with both Tornado and Twisted examples, see the documentation article on Asynchronous connections.\n\n'),
(rethinkdb.net.Connection.use, b"conn.use(db_name)\n\nChange the default database on this connection.\n\n*Example* Change the default database so that we don't need to\nspecify the database when referencing a table.\n\n conn.use('marvel')\n r.table('heroes').run(conn) # refers to r.db('marvel').table('heroes')\n"),
(rethinkdb.ast.Table.config, b'table.config() -> selection<object>\ndatabase.config() -> selection<object>\n\nQuery (read and/or update) the configurations for individual tables or databases.\n\nThe `config` command is a shorthand way to access the `table_config` or `db_config` [System tables](http://rethinkdb.com/docs/system-tables/#configuration-tables). It will return the single row from the system that corresponds to the database or table configuration, as if [get](http://rethinkdb.com/api/python/get) had been called on the system table with the UUID of the database or table in question.\n\n*Example* Get the configuration for the `users` table.\n\n r.table(\'users\').config().run(conn)\n \n {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "users",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "a",\n "replicas": ["a", "b"],\n "nonvoting_replicas": []\n },\n {\n "primary_replica": "d",\n "replicas": ["c", "d"],\n "nonvoting_replicas": []\n }\n ],\n "indexes": [],\n "write_acks": "majority",\n "durability": "hard"\n }\n\n*Example* Change the write acknowledgement requirement of the `users` table.\n\n r.table(\'users\').config().update({\'write_acks\': \'single\'}).run(conn)\n'),
(rethinkdb.ast.Table.rebalance, b'table.rebalance() -> object\ndatabase.rebalance() -> object\n\nRebalances the shards of a table. When called on a database, all the tables in that database will be rebalanced.\n\nThe `rebalance` command operates by measuring the distribution of primary keys within a table and picking split points that will give each shard approximately the same number of documents. It won\'t change the number of shards within a table, or change any other configuration aspect for the table or the database.\n\nA table will lose availability temporarily after `rebalance` is called; use the [wait](http://rethinkdb.com/api/python/wait) command to wait for the table to become available again, or [status](http://rethinkdb.com/api/python/status) to check if the table is available for writing.\n\nRethinkDB automatically rebalances tables when the number of shards are increased, and as long as your documents have evenly distributed primary keys—such as the default UUIDs—it is rarely necessary to call `rebalance` manually. Cases where `rebalance` may need to be called include:\n\n* Tables with unevenly distributed primary keys, such as incrementing integers\n* Changing a table\'s primary key type\n* Increasing the number of shards on an empty table, then using non-UUID primary keys in that table\n\nThe [web UI](http://rethinkdb.com/docs/administration-tools/) (and the [info](http://rethinkdb.com/api/python/info) command) can be used to tell you when a table\'s shards need to be rebalanced.\n\nThe return value of `rebalance` is an object with two fields:\n\n* `rebalanced`: the number of tables rebalanced.\n* `status_changes`: a list of new and old table status values. Each element of the list will be an object with two fields:\n * `old_val`: The table\'s [status](http://rethinkdb.com/api/python/status) value before `rebalance` was executed. \n * `new_val`: The table\'s `status` value after `rebalance` was executed. (This value will almost always indicate the table is unavailable.)\n\nSee the [status](http://rethinkdb.com/api/python/status) command for an explanation of the objects returned in the `old_val` and `new_val` fields.\n\n*Example* Rebalance a table.\n\n r.table(\'superheroes\').rebalance().run(conn)\n \n {\n "rebalanced": 1,\n "status_changes": [\n {\n "old_val": {\n "db": "database",\n "id": "5cb35225-81b2-4cec-9eef-bfad15481265",\n "name": "superheroes",\n "shards": [\n {\n "primary_replica": "jeeves",\n "replicas": [\n {\n "server": "jeeves",\n "state": "ready"\n }\n ]\n },\n {\n "primary_replica": "jeeves",\n "replicas": [\n {\n "server": "jeeves",\n "state": "ready"\n }\n ]\n }\n ],\n "status": {\n "all_replicas_ready": True,\n "ready_for_outdated_reads": True,\n "ready_for_reads": True,\n "ready_for_writes": True\n }\n },\n "new_val": {\n "db": "database",\n "id": "5cb35225-81b2-4cec-9eef-bfad15481265",\n "name": "superheroes",\n "shards": [\n {\n "primary_replica": "jeeves",\n "replicas": [\n {\n "server": "jeeves",\n "state": "transitioning"\n }\n ]\n },\n {\n "primary_replica": "jeeves",\n "replicas": [\n {\n "server": "jeeves",\n "state": "transitioning"\n }\n ]\n }\n ],\n "status": {\n "all_replicas_ready": False,\n "ready_for_outdated_reads": False,\n "ready_for_reads": False,\n "ready_for_writes": False\n }\n }\n \n }\n ]\n }\n'),
(rethinkdb.ast.Table.reconfigure, b'table.reconfigure(shards=<s>, replicas=<r>[, primary_replica_tag=<t>, dry_run=False, nonvoting_replica_tags=None]) -> object\ndatabase.reconfigure(shards=<s>, replicas=<r>[, primary_replica_tag=<t>, dry_run=False, nonvoting_replica_tags=None]) -> object\ntable.reconfigure(emergency_repair=<option>, dry_run=False) -> object\n\nReconfigure a table\'s sharding and replication.\n\n* `shards`: the number of shards, an integer from 1-32. Required.\n* `replicas`: either an integer or a mapping object. Required.\n * If `replicas` is an integer, it specifies the number of replicas per shard. Specifying more replicas than there are servers will return an error.\n * If `replicas` is an object, it specifies key-value pairs of server tags and the number of replicas to assign to those servers: `{"tag1": 2, "tag2": 4, "tag3": 2, ...}`. For more information about server tags, read [Administration tools](http://rethinkdb.com/docs/administration-tools/).\n* `primary_replica_tag`: the primary server specified by its server tag. Required if `replicas` is an object; the tag must be in the object. This must *not* be specified if `replicas` is an integer.\n* `dry_run`: if `True` the generated configuration will not be applied to the table, only returned.\n* `nonvoting_replica_tags`: replicas with these server tags will be added to the `nonvoting_replicas` list of the resulting configuration. (See [failover](http://rethinkdb.com/docs/failover) for details about non-voting replicas.)\n* `emergency_repair`: Used for the Emergency Repair mode. See the separate section below.\n\nThe return value of `reconfigure` is an object with three fields:\n\n* `reconfigured`: the number of tables reconfigured. This will be `0` if `dry_run` is `True`.\n* `config_changes`: a list of new and old table configuration values. Each element of the list will be an object with two fields:\n * `old_val`: The table\'s [config](http://rethinkdb.com/api/python/config) value before `reconfigure` was executed. \n * `new_val`: The table\'s `config` value after `reconfigure` was executed.\n* `status_changes`: a list of new and old table status values. Each element of the list will be an object with two fields:\n * `old_val`: The table\'s [status](http://rethinkdb.com/api/python/status) value before `reconfigure` was executed. \n * `new_val`: The table\'s `status` value after `reconfigure` was executed.\n\nFor `config_changes` and `status_changes`, see the [config](http://rethinkdb.com/api/python/config) and [status](http://rethinkdb.com/api/python/status) commands for an explanation of the objects returned in the `old_val` and `new_val` fields.\n\nA table will lose availability temporarily after `reconfigure` is called; use the [table_status](http://rethinkdb.com/api/python/table_status) command to determine when the table is available again.\n\n**Note:** Whenever you call `reconfigure`, the write durability will be set to `hard` and the write acknowledgments will be set to `majority`; these can be changed by using the `config` command on the table.\n\nIf `reconfigure` is called on a database, all the tables in the database will have their configurations affected. The return value will be an array of the objects described above, one per table.\n\nRead [Sharding and replication](http://rethinkdb.com/docs/sharding-and-replication/) for a complete discussion of the subject, including advanced topics.\n\n*Example* Reconfigure a table.\n\n r.table(\'superheroes\').reconfigure(shards=2, replicas=1).run(conn)\n \n {\n "reconfigured": 1,\n "config_changes": [\n {\n "new_val": {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "superheroes",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "jeeves",\n "replicas": ["jeeves", "alfred"],\n "nonvoting_replicas": []\n },\n {\n "primary_replica": "alfred",\n "replicas": ["jeeves", "alfred"],\n "nonvoting_replicas": []\n }\n ],\n "indexes": [],\n "write_acks": "majority",\n "durability": "hard"\n },\n "old_val": {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "superheroes",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "alfred",\n "replicas": ["jeeves", "alfred"],\n "nonvoting_replicas": []\n }\n ],\n "indexes": [],\n "write_acks": "majority",\n "durability": "hard"\n }\n }\n ],\n "status_changes": [\n {\n "new_val": (status object),\n "old_val": (status object)\n }\n ]\n }\n\n*Example* Reconfigure a table, specifying replicas by server tags.\n\n r.table(\'superheroes\').reconfigure(shards=2, replicas={\'wooster\': 1, \'wayne\': 1}, primary_replica_tag=\'wooster\').run(conn)\n \n {\n "reconfigured": 1,\n "config_changes": [\n {\n "new_val": {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "superheroes",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "jeeves",\n "replicas": ["jeeves", "alfred"],\n "nonvoting_replicas": []\n },\n {\n "primary_replica": "alfred",\n "replicas": ["jeeves", "alfred"],\n "nonvoting_replicas": []\n }\n ],\n "indexes": [],\n "write_acks": "majority",\n "durability": "hard"\n },\n "old_val": {\n "id": "31c92680-f70c-4a4b-a49e-b238eb12c023",\n "name": "superheroes",\n "db": "superstuff",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "alfred",\n "replicas": ["jeeves", "alfred"],\n "nonvoting_replicas": []\n }\n ],\n "indexes": [],\n "write_acks": "majority",\n "durability": "hard"\n }\n }\n ],\n "status_changes": [\n {\n "new_val": (status object),\n "old_val": (status object)\n }\n ]\n }\n\nRethinkDB supports automatic failover when more than half of the voting replicas for each shard of a table are still available (see the Failover documentation for more details). However, if half or more of the voting replicas for a shard are lost, failover will not happen automatically, leaving two options:\n\n* Bring enough of the missing servers back online to allow automatic failover\n* Use emergency repair mode to reconfigure the table\n\nThe `emergency_repair` argument is effectively a different command; when it is specified, no other arguments to `reconfigure` are allowed except for `dry_run`. When it\'s executed, each shard of the table is examined and classified into one of three categories:\n\n* **Healthy:** more than half of the shard\'s voting replicas are still available.\n* **Repairable:** the shard is not healthy, but has at least one replica, whether voting or non-voting, available.\n* **Beyond repair:** the shard has no replicas available.\n\nFor each repairable shard, `emergency_repair` will convert all unavailable voting replicas into non-voting replicas. If all the voting replicas were removed, an arbitrarily-chosen available non-voting replica will be converted into a voting replica. After this operation, all of the shard\'s available replicas will be voting replicas.\n\nSpecify `emergency_repair` with one of two string options:\n\n* `unsafe_rollback`: shards that are beyond repair will be left alone.\n* `unsafe_rollback_or_erase`: a shard that is beyond repair will be destroyed and recreated on an available server that holds another shard for that table.\n\nThe return value of `reconfigure` in emergency repair mode is the same as before. Examine the `config_changes` field to see the old and new configuration settings for the table. As in the normal mode, if you specify `emergency_repair` with `dry_run: True`, the table will not actually be reconfigured.\n\n__Note:__ `emergency_repair` may only be used on individual tables, not on databases. It cannot be used after the `db` command.\n\n*Example* Perform an emergency repair on a table.\n\n r.table(\'superheroes\').reconfigure(emergency_repair=\'unsafe_rollback\').run(conn)\n'),
(rethinkdb.ast.Table.status, b'table.status() -> selection<object>\n\nReturn the status of a table.\n\nThe return value is an object providing information about the table\'s shards, replicas and replica readiness states. For a more complete discussion of the object fields, read about the `table_status` table in [System tables](http://rethinkdb.com/docs/system-tables/#status-tables).\n\n* `id`: the UUID of the table.\n* `name`: the table\'s name.\n* `db`: the database the table is in.\n* `status`: the subfields in this field indicate whether all shards of the table are ready to accept the given type of query: `outdated_reads`, `reads` and `writes`. The `all_replicas_ready` field indicates whether all backfills have finished.\n* `shards`: one entry for each shard in `table_config`. Each shard\'s object has the following fields:\n\t* `primary_replicas`: a list of zero or more servers acting as primary replicas for the table.\n\t* `replicas`: a list of all servers acting as a replica for that shard. The `state` field may be one of the following: `ready`, `transitioning`, `backfilling`, `disconnected`, `waiting_for_primary`, or `waiting_for_quorum`.\n\n*Example* Get a table\'s status.\n\n r.table(\'superheroes\').status().run(conn)\n \n {\n "db": "database",\n "id": "5cb35225-81b2-4cec-9eef-bfad15481265",\n "name": "superheroes",\n "shards": [\n {\n "primary_replicas": ["jeeves"],\n "replicas": [\n {\n "server": "jeeves",\n "state": "ready"\n }\n ]\n },\n {\n "primary_replicas": ["jeeves"],\n "replicas": [\n {\n "server": "jeeves",\n "state": "ready"\n }\n ]\n }\n ],\n "status": {\n "all_replicas_ready": True,\n "ready_for_outdated_reads": True,\n "ready_for_reads": True,\n "ready_for_writes": True\n }\n }\n'),
(rethinkdb.ast.Table.wait, b'table.wait([wait_for=\'ready_for_writes\', timeout=<sec>]) -> object\ndatabase.wait([wait_for=\'ready_for_writes\', timeout=<sec>]) -> object\nr.wait([wait_for=\'ready_for_writes\', timeout=<sec>]) -> object\n\nWait for a table or all the tables in a database to be ready. A table may be temporarily unavailable after creation, rebalancing or reconfiguring. The `wait` command blocks until the given table (or database) is fully up to date.\n\nThe `wait` command takes two optional arguments:\n\n* `wait_for`: a string indicating a table [status](http://rethinkdb.com/api/python/status) to wait on before returning, one of `ready_for_outdated_reads`, `ready_for_reads`, `ready_for_writes`, or `all_replicas_ready`. The default is `ready_for_writes`. \n* `timeout`: a number indicating maximum time, in seconds, to wait for the table to be ready. If this value is exceeded, a `ReqlRuntimeError` will be thrown. A value of`0` means no timeout. The default is `0` (no timeout).\n\nThe return value is an object consisting of a single field, `ready`. The value is an integer indicating the number of tables waited for. It will always be `1` when `wait` is called on a table, and the total number of tables when called on a database.\n\nIf `wait` is called with no table or database specified (the `r.wait()` form), it will wait on all the tables in the default database (set with the [connect](http://rethinkdb.com/api/python/connect/) command\'s `db` parameter, which defaults to `test`).\n\n*Example* Wait on a table to be ready.\n\n r.table(\'superheroes\').wait().run(conn)\n \n {"ready": 1}\n'),
(rethinkdb.ast.RqlQuery.avg, b"sequence.avg([field | function]) -> number\n\nAverages all the elements of a sequence. If called with a field name,\naverages all the values of that field in the sequence, skipping\nelements of the sequence that lack that field. If called with a\nfunction, calls that function on every element of the sequence and\naverages the results, skipping elements of the sequence where that\nfunction returns `None` or a non-existence error.\n\nProduces a non-existence error when called on an empty sequence. You\ncan handle this case with `default`.\n\n*Example* What's the average of 3, 5, and 7?\n\n r.expr([3, 5, 7]).avg().run(conn)\n\n*Example* What's the average number of points scored in a game?\n\n r.table('games').avg('points').run(conn)\n\n*Example* What's the average number of points scored in a game,\ncounting bonus points?\n\n r.table('games').avg(lambda game:\n game['points'] + game['bonus_points']\n ).run(conn)\n\n*Example* What's the average number of points scored in a game?\n(But return `None` instead of raising an error if there are no games where\npoints have been scored.)\n\n r.table('games').avg('points').default(None).run(conn)\n"),
(rethinkdb.ast.RqlQuery.contains, b"sequence.contains([value | predicate_function, ...]) -> bool\n\nWhen called with values, returns `True` if a sequence contains all the\nspecified values. When called with predicate functions, returns `True`\nif for each predicate there exists at least one element of the stream\nwhere that predicate returns `True`.\n\nValues and predicates may be mixed freely in the argument list.\n\n*Example* Has Iron Man ever fought Superman?\n\n r.table('marvel').get('ironman')['opponents'].contains('superman').run(conn)\n\n*Example* Has Iron Man ever defeated Superman in battle?\n\n r.table('marvel').get('ironman')['battles'].contains(lambda battle:\n (battle['winner'] == 'ironman') & (battle['loser'] == 'superman')\n ).run(conn)\n\n*Example* Use `contains` with a predicate function to simulate an `or`. Return the Marvel superheroes who live in Detroit, Chicago or Hoboken.\n\n r.table('marvel').filter(\n lambda hero: r.expr(['Detroit', 'Chicago', 'Hoboken']).contains(hero['city'])\n ).run(conn)\n"),
(rethinkdb.ast.RqlQuery.count, b"sequence.count([value | predicate_function]) -> number\nbinary.count() -> number\n\nCounts the number of elements in a sequence. If called with a value,\ncounts the number of times that value occurs in the sequence. If\ncalled with a predicate function, counts the number of elements in the\nsequence where that function returns `True`.\n\nIf `count` is called on a [binary](http://rethinkdb.com/api/python/binary) object, it will return the size of the object in bytes.\n\n*Example* Count the number of users.\n\n r.table('users').count().run(conn)\n\n*Example* Count the number of 18 year old users.\n\n r.table('users')['age'].count(18).run(conn)\n\n*Example* Count the number of users over 18.\n\n r.table('users')['age'].count(lambda age: age > 18).run(conn)\n\n r.table('users').count(lambda user: user['age'] > 18).run(conn)\n"),
(rethinkdb.ast.RqlQuery.distinct, b"sequence.distinct() -> array\ntable.distinct([index=<indexname>]) -> stream\n\nRemoves duplicate elements from a sequence.\n\nThe `distinct` command can be called on any sequence or table with an index.\n\n*Example* Which unique villains have been vanquished by Marvel heroes?\n\n r.table('marvel').concat_map(\n lambda hero: hero['villain_list']).distinct().run(conn)\n\n*Example* Topics in a table of messages have a secondary index on them, and more than one message can have the same topic. What are the unique topics in the table?\n\n r.table('messages').distinct(index='topics').run(conn)\n\nThe above structure is functionally identical to:\n\n r.table('messages')['topics'].distinct().run(conn)\n\nHowever, the first form (passing the index as an argument to `distinct`) is faster, and won't run into array limit issues since it's returning a stream.\n"),
(rethinkdb.ast.RqlQuery.group, b'sequence.group(field | function..., [index=<indexname>, multi=False]) -> grouped_stream\n\nTakes a stream and partitions it into multiple groups based on the\nfields or functions provided.\n\nWith the `multi` flag single documents can be assigned to multiple groups, similar to the behavior of [multi-indexes](http://rethinkdb.com/docs/secondary-indexes/python). When `multi` is `True` and the grouping value is an array, documents will be placed in each group that corresponds to the elements of the array. If the array is empty the row will be ignored.\n\n*Example* Grouping games by player.\n\nSuppose that the table `games` has the following data:\n\n [\n {"id": 2, "player": "Bob", "points": 15, "type": "ranked"},\n {"id": 5, "player": "Alice", "points": 7, "type": "free"},\n {"id": 11, "player": "Bob", "points": 10, "type": "free"},\n {"id": 12, "player": "Alice", "points": 2, "type": "free"}\n ]\n\nGrouping games by player can be done with:\n\n > r.table(\'games\').group(\'player\').run(conn)\n \n {\n "Alice": [\n {"id": 5, "player": "Alice", "points": 7, "type": "free"},\n {"id": 12, "player": "Alice", "points": 2, "type": "free"}\n ],\n "Bob": [\n {"id": 2, "player": "Bob", "points": 15, "type": "ranked"},\n {"id": 11, "player": "Bob", "points": 10, "type": "free"}\n ]\n }\n\nCommands chained after `group` will be called on each of these grouped\nsub-streams, producing grouped data.\n\n*Example* What is each player\'s best game?\n\n > r.table(\'games\').group(\'player\').max(\'points\').run(conn)\n \n {\n "Alice": {"id": 5, "player": "Alice", "points": 7, "type": "free"},\n "Bob": {"id": 2, "player": "Bob", "points": 15, "type": "ranked"}\n }\n\nCommands chained onto grouped data will operate on each grouped datum,\nproducing more grouped data.\n\n*Example* What is the maximum number of points scored by each player?\n\n > r.table(\'games\').group(\'player\').max(\'points\')[\'points\'].run(conn)\n \n {\n "Alice": 7,\n "Bob": 15\n }\n\nYou can also group by more than one field.\n\n*Example* What is the maximum number of points scored by each\nplayer for each game type?\n\n > r.table(\'games\').group(\'player\', \'type\').max(\'points\')[\'points\'].run(conn)\n \n {\n ("Alice", "free"): 7,\n ("Bob", "free"): 10,\n ("Bob", "ranked"): 15\n }\n\nYou can also group by a function.\n\n*Example* What is the maximum number of points scored by each\nplayer for each game type?\n\n > r.table(\'games\')\n .group(lambda game:\n game.pluck(\'player\', \'type\')\n ).max(\'points\')[\'points\'].run(conn)\n \n {\n frozenset([(\'player\', \'Alice\'), (\'type\', \'free\')]): 7,\n frozenset([(\'player\', \'Bob\'), (\'type\', \'free\')]): 10,\n frozenset([(\'player\', \'Bob\'), (\'type\', \'ranked\')]): 15,\n }\n\nUsing a function, you can also group by date on a ReQL [date field](http://rethinkdb.com/docs/dates-and-times/javascript/).\n\n*Example* How many matches have been played this year by month?\n\n > r.table(\'matches\').group(\n lambda match: [match[\'date\'].year(), match[\'date\'].month()]\n ).count().run(conn)\n \n {\n (2014, 2): 2,\n (2014, 3): 2,\n (2014, 4): 1,\n (2014, 5): 3\n }\n\nYou can also group on an index (primary key or secondary).\n\n*Example* What is the maximum number of points scored by game type?\n\n > r.table(\'games\').group(index=\'type\').max(\'points\')[\'points\'].run(conn)\n \n {\n "free": 10,\n "ranked": 15\n }\n\nSuppose that the table `games2` has the following data:\n\n [\n { \'id\': 1, \'matches\': {\'a\': [1, 2, 3], \'b\': [4, 5, 6]} },\n { \'id\': 2, \'matches\': {\'b\': [100], \'c\': [7, 8, 9]} },\n { \'id\': 3, \'matches\': {\'a\': [10, 20], \'c\': [70, 80]} }\n ]\n\nUsing the `multi` option we can group data by match A, B or C.\n\n > r.table(\'games2\').group(r.row[\'matches\'].keys(), multi=True).run(conn)\n \n [\n {\n \'group\': \'a\',\n \'reduction\': [ <id 1>, <id 3> ]\n },\n {\n \'group\': \'b\',\n \'reduction\': [ <id 1>, <id 2> ]\n },\n {\n \'group\': \'c\',\n \'reduction\': [ <id 2>, <id 3> ]\n }\n ]\n\n(The full result set is abbreviated in the figure; `<id 1>, <id 2>` and `<id 3>` would be the entire documents matching those keys.)\n\n*Example* Use [map](http://rethinkdb.com/api/python/map) and [sum](http://rethinkdb.com/api/python/sum) to get the total points scored for each match.\n\n r.table(\'games2\').group(r.row[\'matches\'].keys(), multi=True).ungroup().map(\n lambda doc: { \'match\': doc[\'group\'], \'total\': doc[\'reduction\'].sum(\n lambda set: set[\'matches\'][doc[\'group\']].sum()\n )}).run(conn)\n \n [\n { \'match\': \'a\', \'total\': 36 },\n { \'match\': \'b\', \'total\': 115 },\n { \'match\': \'c\', \'total\': 174 }\n ]\n\nThe inner `sum` adds the scores by match within each document; the outer `sum` adds those results together for a total across all the documents.\n\nIf you want to operate on all the groups rather than operating on each\ngroup (e.g. if you want to order the groups by their reduction), you\ncan use [ungroup](http://rethinkdb.com/api/python/ungroup/) to turn a grouped stream or\ngrouped data into an array of objects representing the groups.\n\n*Example* Ungrouping grouped data.\n\n > r.table(\'games\').group(\'player\').max(\'points\')[\'points\'].ungroup().run(conn)\n \n [\n {\n "group": "Alice",\n "reduction": 7\n },\n {\n "group": "Bob",\n "reduction": 15\n }\n ]\n\nUngrouping is useful e.g. for ordering grouped data, or for inserting\ngrouped data into a table.\n\n*Example* What is the maximum number of points scored by each\nplayer, with the highest scorers first?\n\n > r.table(\'games\').group(\'player\').max(\'points\')[\'points\'].ungroup().order_by(\n r.desc(\'reduction\')).run(conn)\n \n [\n {\n "group": "Bob",\n "reduction": 15\n },\n {\n "group": "Alice",\n "reduction": 7\n }\n ]\n\nWhen grouped data are returned to the client, they are transformed\ninto a client-specific native type. (Something similar is done with\n[times](http://rethinkdb.com/docs/dates-and-times/).) In Python, grouped data are\ntransformed into a `dictionary`. If the group value is an `array`, the\nkey is converted to a `tuple`. If the group value is a `dictionary`,\nit will be converted to a `frozenset`.\n\nIf you instead want to receive the raw\npseudotype from the server (e.g. if you\'re planning to serialize the\nresult as JSON), you can specify `group_format: \'raw\'` as an optional\nargument to `run`:\n\n*Example* Get back the raw `GROUPED_DATA` pseudotype.\n\n > r.table(\'games\').group(\'player\').avg(\'points\').run(conn, group_format=\'raw\')\n \n {\n "$reql_type$": "GROUPED_DATA",\n "data": [\n ["Alice", 4.5],\n ["Bob", 12.5]\n ]\n }\n\nNot passing the `group_format` flag would return:\n\n {\n "Alice": 4.5,\n "Bob": 12.5\n }\n\nYou might also want to use the [ungroup](http://rethinkdb.com/api/python/ungroup/)\ncommand (see above), which will turn the grouped data into an array of\nobjects on the server.\n\nIf you run a query that returns a grouped stream, it will be\nautomatically converted to grouped data before being sent back to you\n(there is currently no efficient way to stream groups from RethinkDB).\nThis grouped data is subject to the array size limit (see [run](http://rethinkdb.com/api/python/run)).\n\nIn general, operations on grouped streams will be efficiently\ndistributed, and operations on grouped data won\'t be. You can figure\nout what you\'re working with by putting `type_of` on the end of your\nquery. Below are efficient and inefficient examples.\n\n*Example* Efficient operation.\n\n # r.table(\'games\').group(\'player\').type_of().run(conn)\n # Returns "GROUPED_STREAM"\n r.table(\'games\').group(\'player\').min(\'points\').run(conn) # EFFICIENT\n\n*Example* Inefficient operation.\n\n # r.table(\'games\').group(\'player\').order_by(\'score\').type_of().run(conn)\n # Returns "GROUPED_DATA"\n r.table(\'games\').group(\'player\').order_by(\'score\').nth(0).run(conn) # INEFFICIENT\n\nWhat does it mean to be inefficient here? When operating on grouped\ndata rather than a grouped stream, *all* of the data has to be\navailable on the node processing the query. This means that the\noperation will only use one server\'s resources, and will require\nmemory proportional to the size of the grouped data it\'s operating\non. (In the case of the [order_by](http://rethinkdb.com/api/python/order_by/) in the inefficient example, that\nmeans memory proportional **to the size of the table**.) The array\nlimit is also enforced for grouped data, so the `order_by` example\nwould fail for tables with more than 100,000 rows unless you used the `array_limit` option with `run`.\n\n*Example* What is the maximum number of points scored by each\nplayer in free games?\n\n > r.table(\'games\').filter(lambda game:\n game[\'type\'] = \'free\'\n ).group(\'player\').max(\'points\')[\'points\'].run(conn)\n \n {\n "Alice": 7,\n "Bob": 10\n }\n\n*Example* What is each player\'s highest even and odd score?\n\n > r.table(\'games\')\n .group(\'name\', lambda game:\n game[\'points\'] % 2\n ).max(\'points\')[\'points\'].run(conn)\n \n {\n ("Alice", 1): 7,\n ("Bob", 0): 10,\n ("Bob", 1): 15\n }\n'),
(rethinkdb.ast.RqlQuery.max, b"sequence.max(field | function) -> element\nsequence.max(index=<indexname>) -> element\n\nFinds the maximum element of a sequence. The `max` command can be called with:\n\n* a **field name**, to return the element of the sequence with the largest value in that field;\n* an **index** (the primary key or a secondary index), to return the element of the sequence with the largest value in that index;\n* a **function**, to apply the function to every element within the sequence and return the element which returns the largest value from the function, ignoring any elements where the function produces a non-existence error.\n\nFor more information on RethinkDB's sorting order, read the section in [ReQL data types](http://rethinkdb.com/docs/data-types/#sorting-order).\n\nCalling `max` on an empty sequence will throw a non-existence error; this can be handled using the [default](http://rethinkdb.com/api/python/default/) command.\n\n*Example* Return the maximum value in the list `[3, 5, 7]`.\n\n r.expr([3, 5, 7]).max().run(conn)\n\n*Example* Return the user who has scored the most points.\n\n r.table('users').max('points').run(conn)\n\n*Example* The same as above, but using a secondary index on the `points` field.\n\n r.table('users').max(index='points').run(conn)\n\n*Example* Return the user who has scored the most points, adding in bonus points from a separate field using a function.\n\n r.table('users').max(lambda user:\n user['points'] + user['bonus_points']\n ).run(conn)\n\n*Example* Return the highest number of points any user has ever scored. This returns the value of that `points` field, not a document.\n\n r.table('users').max('points')['points'].run(conn)\n\n*Example* Return the user who has scored the most points, but add a default `None` return value to prevent an error if no user has ever scored points.\n\n r.table('users').max('points').default(None).run(conn)\n"),
(rethinkdb.ast.RqlQuery.min, b"sequence.min(field | function) -> element\nsequence.min(index=<indexname>) -> element\n\nFinds the minimum element of a sequence. The `min` command can be called with:\n\n* a **field name**, to return the element of the sequence with the smallest value in that field;\n* an **index** (the primary key or a secondary index), to return the element of the sequence with the smallest value in that index;\n* a **function**, to apply the function to every element within the sequence and return the element which returns the smallest value from the function, ignoring any elements where the function produces a non-existence error.\n\nFor more information on RethinkDB's sorting order, read the section in [ReQL data types](http://rethinkdb.com/docs/data-types/#sorting-order).\n\nCalling `min` on an empty sequence will throw a non-existence error; this can be handled using the [default](http://rethinkdb.com/api/python/default/) command.\n\n*Example* Return the minimum value in the list `[3, 5, 7]`.\n\n r.expr([3, 5, 7]).min().run(conn)\n\n*Example* Return the user who has scored the fewest points.\n\n r.table('users').min('points').run(conn)\n\n*Example* The same as above, but using a secondary index on the `points` field.\n\n r.table('users').min(index='points').run(conn)\n\n*Example* Return the user who has scored the fewest points, adding in bonus points from a separate field using a function.\n\n r.table('users').min(lambda user:\n user['points'] + user['bonus_points']\n ).run(conn)\n\n*Example* Return the smallest number of points any user has ever scored. This returns the value of that `points` field, not a document.\n\n r.table('users').min('points')['points'].run(conn)\n\n*Example* Return the user who has scored the fewest points, but add a default `None` return value to prevent an error if no user has ever scored points.\n\n r.table('users').min('points').default(None).run(conn)\n"),
(rethinkdb.ast.RqlQuery.reduce, b'sequence.reduce(function) -> value\n\nProduce a single value from a sequence through repeated application of a reduction\nfunction. \nThe reduction function can be called on:\n\n- two elements of the sequence\n- one element of the sequence and one result of a previous reduction\n- two results of previous reductions\n\nThe reduction function can be called on the results of two previous reductions because the\n`reduce` command is distributed and parallelized across shards and CPU cores. A common\nmistaken when using the `reduce` command is to suppose that the reduction is executed\nfrom left to right. Read the [map-reduce in RethinkDB](http://rethinkdb.com/docs/map-reduce/) article to\nsee an example.\n\nIf the sequence is empty, the server will produce a `ReqlRuntimeError` that can be\ncaught with `default`. \nIf the sequence has only one element, the first element will be returned.\n\n*Example* Return the number of documents in the table `posts`.\n\n r.table("posts").map(lambda doc: 1)\n .reduce(lambda left, right: left+right)\n .default(0).run(conn)\n\nA shorter way to execute this query is to use [count](http://rethinkdb.com/api/python/count).\n\n*Example* Suppose that each `post` has a field `comments` that is an array of\ncomments. \nReturn the number of comments for all posts.\n\n r.table("posts").map(lambda doc:\n doc["comments"].count()\n ).reduce(lambda left, right:\n left+right\n ).default(0).run(conn)\n\n*Example* Suppose that each `post` has a field `comments` that is an array of\ncomments. \nReturn the maximum number comments per post.\n\n r.table("posts").map(lambda doc:\n doc["comments"].count()\n ).reduce(lambda left, right:\n r.branch(\n left > right,\n left,\n right\n )\n ).default(0).run(conn)\n\nA shorter way to execute this query is to use [max](http://rethinkdb.com/api/python/max).\n'),
(rethinkdb.ast.RqlQuery.sum, b"sequence.sum([field | function]) -> number\n\nSums all the elements of a sequence. If called with a field name,\nsums all the values of that field in the sequence, skipping elements\nof the sequence that lack that field. If called with a function,\ncalls that function on every element of the sequence and sums the\nresults, skipping elements of the sequence where that function returns\n`None` or a non-existence error.\n\nReturns `0` when called on an empty sequence.\n\n*Example* What's 3 + 5 + 7?\n\n r.expr([3, 5, 7]).sum().run(conn)\n\n*Example* How many points have been scored across all games?\n\n r.table('games').sum('points').run(conn)\n\n*Example* How many points have been scored across all games,\ncounting bonus points?\n\n r.table('games').sum(lambda game:\n game['points'] + game['bonus_points']\n ).run(conn)\n"),
(rethinkdb.ast.RqlQuery.ungroup, b'grouped_stream.ungroup() -> array\ngrouped_data.ungroup() -> array\n\nTakes a grouped stream or grouped data and turns it into an array of\nobjects representing the groups. Any commands chained after `ungroup`\nwill operate on this array, rather than operating on each group\nindividually. This is useful if you want to e.g. order the groups by\nthe value of their reduction.\n\nThe format of the array returned by `ungroup` is the same as the\ndefault native format of grouped data in the JavaScript driver and\ndata explorer.\n\n*Example* What is the maximum number of points scored by each\nplayer, with the highest scorers first?\n\nSuppose that the table `games` has the following data:\n\n [\n {"id": 2, "player": "Bob", "points": 15, "type": "ranked"},\n {"id": 5, "player": "Alice", "points": 7, "type": "free"},\n {"id": 11, "player": "Bob", "points": 10, "type": "free"},\n {"id": 12, "player": "Alice", "points": 2, "type": "free"}\n ]\n\nWe can use this query:\n\n r.table(\'games\')\n .group(\'player\').max(\'points\')[\'points\']\n .ungroup().order_by(r.desc(\'reduction\')).run(conn)\n\nResult: \n\n [\n {\n "group": "Bob",\n "reduction": 15\n },\n {\n "group": "Alice",\n "reduction": 7\n }\n ]\n\n*Example* Select one random player and all their games.\n\n r.table(\'games\').group(\'player\').ungroup().sample(1).run(conn)\n\nResult:\n\n [\n {\n "group": "Bob",\n "reduction": [\n {"id": 2, "player": "Bob", "points": 15, "type": "ranked"},\n {"id": 11, "player": "Bob", "points": 10, "type": "free"}\n ]\n }\n ]\n\nNote that if you didn\'t call `ungroup`, you would instead select one\nrandom game from each player:\n\n r.table(\'games\').group(\'player\').sample(1).run(conn)\n\nResult:\n\n {\n "Alice": [\n {"id": 5, "player": "Alice", "points": 7, "type": "free"}\n ],\n "Bob": [\n {"id": 11, "player": "Bob", "points": 10, "type": "free"}\n ]\n }\n\n*Example* Types!\n\n r.table(\'games\').group(\'player\').type_of().run(conn) # Returns "GROUPED_STREAM"\n r.table(\'games\').group(\'player\').ungroup().type_of().run(conn) # Returns "ARRAY"\n r.table(\'games\').group(\'player\').avg(\'points\').run(conn) # Returns "GROUPED_DATA"\n r.table(\'games\').group(\'player\').avg(\'points\').ungroup().run(conn) #Returns "ARRAY"\n'),
(rethinkdb.args, b"r.args(array) -> special\n\n`r.args` is a special term that's used to splice an array of arguments\ninto another term. This is useful when you want to call a variadic\nterm such as [get_all](http://rethinkdb.com/api/python/get_all/) with a set of arguments produced at runtime.\n\nThis is analogous to unpacking argument lists in Python.\n\n*Example* Get Alice and Bob from the table `people`.\n\n r.table('people').get_all('Alice', 'Bob').run(conn)\n # or\n r.table('people').get_all(r.args(['Alice', 'Bob'])).run(conn)\n\n*Example* Get all of Alice's children from the table `people`.\n\n # r.table('people').get('Alice') returns {'id': 'Alice', 'children': ['Bob', 'Carol']}\n r.table('people').get_all(r.args(r.table('people').get('Alice')['children'])).run(conn)\n"),
(rethinkdb.binary, b'r.binary(data) -> binary\n\nEncapsulate binary data within a query.\n\nThe type of data `binary` accepts depends on the client language. In Python, it expects a parameter of `bytes` type. Using a `bytes` object within a query implies the use of `binary` and the ReQL driver will automatically perform the coercion (in Python 3 only).\n\nBinary objects returned to the client in JavaScript will also be of the `bytes` type. This can be changed with the `binary_format` option provided to [run](http://rethinkdb.com/api/python/run) to return "raw" objects.\n\nOnly a limited subset of ReQL commands may be chained after `binary`:\n\n* [coerce_to](http://rethinkdb.com/api/python/coerce_to/) can coerce `binary` objects to `string` types\n* [count](http://rethinkdb.com/api/python/count/) will return the number of bytes in the object\n* [slice](http://rethinkdb.com/api/python/slice/) will treat bytes like array indexes (i.e., `slice(10,20)` will return bytes 10–19)\n* [type_of](http://rethinkdb.com/api/python/type_of) returns `PTYPE<BINARY>`\n* [info](http://rethinkdb.com/api/python/info) will return information on a binary object.\n\n*Example* Save an avatar image to a existing user record.\n\n f = open(\'./default_avatar.png\', \'rb\')\n avatar_image = f.read()\n f.close()\n r.table(\'users\').get(100).update({\'avatar\': r.binary(avatar_image)}).run(conn)\n\n*Example* Get the size of an existing avatar image.\n\n r.table(\'users\').get(100)[\'avatar\'].count().run(conn)\n \n 14156\n\nRead more details about RethinkDB\'s binary object support: [Storing binary objects](http://rethinkdb.com/docs/storing-binary/).\n'),
(rethinkdb.branch, b'r.branch(test, true_action[, test2, else_action, ...], false_action) -> any\n\nPerform a branching conditional equivalent to `if-then-else`.\n\nThe `branch` command takes 2n+1 arguments: pairs of conditional expressions and commands to be executed if the conditionals return any value but `False` or `None` (i.e., "truthy" values), with a final "else" command to be evaluated if all of the conditionals are `False` or `None`.\n\nr.branch(test1, val1, test2, val2, elseval)\n\nis the equivalent of the Python statement\n\n if test1:\n return val1\n elif test2:\n return val2\n else:\n return elseval\n\n*Example* Test the value of x.\n\n x = 10\n r.branch((x > 5), \'big\', \'small\').run(conn)\n \n > "big"\n\n*Example* Categorize heroes by victory counts.\n\n r.table(\'marvel\').map(\n r.branch(\n r.row[\'victories\'] > 100,\n r.row[\'name\'] + \' is a superhero\',\n r.row[\'victories\'] > 10,\n r.row[\'name\'] + \' is a hero\',\n r.row[\'name\'] + \' is very nice\'\n )\n ).run(conn)\n\nIf the documents in the table `marvel` are:\n\n [\n { "name": "Iron Man", "victories": 214 },\n { "name": "Jubilee", "victories": 49 },\n { "name": "Slava", "victories": 5 }\n ]\n\nThe results will be:\n\n [\n "Iron Man is a superhero",\n "Jubilee is a hero",\n "Slava is very nice"\n ]\n'),
(rethinkdb.ast.RqlQuery.coerce_to, b"sequence.coerce_to('array') -> array\nvalue.coerce_to('string') -> string\nstring.coerce_to('number') -> number\narray.coerce_to('object') -> object\nsequence.coerce_to('object') -> object\nobject.coerce_to('array') -> array\nbinary.coerce_to('string') -> string\nstring.coerce_to('binary') -> binary\n\nConvert a value of one type into another.\n\n* a sequence, selection or object can be coerced to an array\n* a sequence, selection or an array of key-value pairs can be coerced to an object\n* a string can be coerced to a number\n* any datum (single value) can be coerced to a string\n* a binary object can be coerced to a string and vice-versa\n\n*Example* Coerce a stream to an array to store its output in a field. (A stream cannot be stored in a field directly.)\n\n r.table('posts').map(lambda post: post.merge(\n { 'comments': r.table('comments').get_all(post['id'], index='post_id').coerce_to('array') }\n )).run(conn)\n\n*Example* Coerce an array of pairs into an object.\n\n r.expr([['name', 'Ironman'], ['victories', 2000]]).coerce_to('object').run(conn)\n\n__Note:__ To coerce a list of key-value pairs like `['name', 'Ironman', 'victories', 2000]` to an object, use the [object](http://rethinkdb.com/api/python/object) command.\n\n*Example* Coerce a number to a string.\n\n r.expr(1).coerce_to('string').run(conn)\n\n"),
(rethinkdb.ast.RqlQuery.default, b'value.default(default_value | function) -> any\nsequence.default(default_value | function) -> any\n\nProvide a default value in case of non-existence errors. The `default` command evaluates its first argument (the value it\'s chained to). If that argument returns `None` or a non-existence error is thrown in evaluation, then `default` returns its second argument. The second argument is usually a default value, but it can be a function that returns a value.\n\n*Example* Retrieve the titles and authors of the table `posts`.\nIn the case where the author field is missing or `None`, we want to retrieve the string\n`Anonymous`.\n\n r.table("posts").map(lambda post:\n {\n "title": post["title"],\n "author": post["author"].default("Anonymous")\n }\n ).run(conn)\n\nWe can rewrite the previous query with `r.branch` too.\n\n r.table("posts").map(lambda post:\n r.branch(\n post.has_fields("author"),\n {\n "title": post["title"],\n "author": post["author"]\n },\n {\n "title": post["title"],\n "author": "Anonymous" \n }\n )\n ).run(conn)\n\n*Example* The `default` command can also be used to filter documents. Retrieve all our users who are not grown-ups or whose age is unknown\n(i.e., the field `age` is missing or equals `None`).\n\n r.table("users").filter(lambda user:\n (user["age"] < 18).default(True)\n ).run(conn)\n\nOne more way to write the previous query is to set the age to be `-1` when the\nfield is missing.\n\n r.table("users").filter(lambda user:\n user["age"].default(-1) < 18\n ).run(conn)\n\nThis can be accomplished with [has_fields](http://rethinkdb.com/api/python/has_fields/) rather than `default`.\n\n r.table("users").filter(lambda user:\n user.has_fields("age").not_() | (user["age"] < 18)\n ).run(conn)\n\nThe body of every [filter](http://rethinkdb.com/api/python/filter/) is wrapped in an implicit `.default(False)`. You can overwrite the value `False` with the `default` option.\n\n r.table("users").filter(\n lambda user: (user["age"] < 18).default(True),\n default=True\n ).run(conn)\n\n*Example* The function form of `default` receives the error message as its argument.\n\n r.table("posts").map(lambda post:\n {\n "title": post["title"],\n "author": post["author"].default(lambda err: err)\n }\n ).run(conn)\n\nThis particular example simply returns the error message, so it isn\'t very useful. But it would be possible to change the default value based on the specific error message thrown.\n'),
(rethinkdb.ast.RqlQuery.do, b"any.do(function) -> any\nr.do([args]*, function) -> any\nany.do(expr) -> any\nr.do([args]*, expr) -> any\n\nCall an anonymous function using return values from other ReQL commands or queries as arguments.\n\nThe last argument to `do` (or, in some forms, the only argument) is an expression or an anonymous function which receives values from either the previous arguments or from prefixed commands chained before `do`. The `do` command is essentially a single-element [map](http://rethinkdb.com/api/python/map/), letting you map a function over just one document. This allows you to bind a query result to a local variable within the scope of `do`, letting you compute the result just once and reuse it in a complex expression or in a series of ReQL commands.\n\nArguments passed to the `do` function must be basic data types, and cannot be streams or selections. (Read about [ReQL data types](http://rethinkdb.com/docs/data-types/).) While the arguments will all be evaluated before the function is executed, they may be evaluated in any order, so their values should not be dependent on one another. The type of `do`'s result is the type of the value returned from the function or last expression.\n\n*Example* Compute a golfer's net score for a game.\n\n r.table('players').get('86be93eb-a112-48f5-a829-15b2cb49de1d').do(\n lambda player: player['gross_score'] - player['course_handicap']\n ).run(conn)\n\n*Example* Return the name of the best scoring player in a two-player golf match.\n\n r.do(r.table('players').get(id1), r.table('players').get(id2),\n (lambda player1, player2:\n r.branch(player1['gross_score'].lt(player2['gross_score']),\n player1, player2))\n ).run(conn)\n\nNote that `branch`, the ReQL conditional command, must be used instead of `if`. See the `branch` [documentation](http://rethinkdb.com/api/python/branch) for more.\n\n*Example* Take different actions based on the result of a ReQL [insert](http://rethinkdb.com/api/python/insert) command.\n\n new_data = {\n 'id': 100,\n 'name': 'Agatha',\n 'gross_score': 57,\n 'course_handicap': 4\n }\n r.table('players').insert(new_data).do(lambda doc:\n r.branch((doc['inserted'] != 0),\n r.table('log').insert({'time': r.now(), 'response': doc, 'result': 'ok'}),\n r.table('log').insert({'time': r.now(), 'response': doc, 'result': 'error'}))\n ).run(conn)\n"),
(rethinkdb.error, b"r.error(message) -> error\n\nThrow a runtime error. If called with no arguments inside the second argument to `default`, re-throw the current error.\n\n*Example* Iron Man can't possibly have lost a battle:\n\n r.table('marvel').get('IronMan').do(\n lambda ironman: r.branch(ironman['victories'] < ironman['battles'],\n r.error('impossible code path'),\n ironman)\n ).run(conn)\n\n"),
(rethinkdb.expr, b"r.expr(value) -> value\n\nConstruct a ReQL JSON object from a native object.\n\nIf the native object is of the `bytes` type, then `expr` will return a binary object. See [binary](http://rethinkdb.com/api/python/binary) for more information.\n\n*Example* Objects wrapped with expr can then be manipulated by ReQL API functions.\n\n r.expr({'a':'b'}).merge({'b':[1,2,3]}).run(conn)\n\n"),
(rethinkdb.ast.RqlQuery.for_each, b"sequence.for_each(write_function) -> object\n\nLoop over a sequence, evaluating the given write query for each element.\n\n*Example* Now that our heroes have defeated their villains, we can safely remove them from the villain table.\n\n r.table('marvel').for_each(\n lambda hero: r.table('villains').get(hero['villainDefeated']).delete()\n ).run(conn)\n\n"),
(rethinkdb.http, b'r.http(url[, options]) -> value\nr.http(url[, options]) -> stream\n\nRetrieve data from the specified URL over HTTP. The return type depends on the `result_format` option, which checks the `Content-Type` of the response by default.\n\n*Example* Perform an HTTP `GET` and store the result in a table.\n\n r.table(\'posts\').insert(r.http(\'http://httpbin.org/get\')).run(conn)\n\nSee [the tutorial](http://rethinkdb.com/docs/external-api-access/) on `r.http` for more examples on how to use this command.\n\n* `timeout`: timeout period in seconds to wait before aborting the connect (default `30`).\n* `reattempts`: number of retry attempts to make after failed connections (default `5`).\n* `redirects`: number of redirect and location headers to follow (default `1`).\n* `verify`: if `true`, verify the server\'s SSL certificate (default `true`).\n* `result_format`: string specifying the format to return results in. One of the following:\n * `text`: always return a string.\n * `json`: parse the result as JSON, raising an error on failure.\n * `jsonp`: parse the result as Padded JSON.\n * `binary`: return a binary object.\n * `auto`: parse the result based on its `Content-Type` (the default):\n * `application/json`: as `json`\n * `application/json-p`, `text/json-p`, `text/javascript`: as `jsonp`\n * `audio/*`, `video/*`, `image/*`, `application/octet-stream`: as `binary`\n * anything else: as `text`\n\n* `method`: HTTP method to use for the request. One of `GET`, `POST`, `PUT`, `PATCH`, `DELETE` or `HEAD`. Default: `GET`.\n* `auth`: object giving authentication, with the following fields:\n * `type`: `basic` (default) or `digest`\n * `user`: username\n * `pass`: password in plain text\n* `params`: object specifying URL parameters to append to the URL as encoded key/value pairs. `{ \'query\': \'banana\', \'limit\': 2 }` will be appended as `?query=banana&limit=2`. Default: no parameters.\n* `header`: Extra header lines to include. The value may be an array of strings or an object. Default: `Accept-Encoding: deflate;q=1, gzip;q=0.5` and `User-Agent: RethinkDB/<VERSION>`.\n* `data`: Data to send to the server on a `POST`, `PUT`, `PATCH`, or `DELETE` request. For `POST` requests, data may be either an object (which will be written to the body as form-encoded key/value pairs) or a string; for all other requests, data will be serialized as JSON and placed in the request body, sent as `Content-Type: application/json`. Default: no data will be sent.\n\n*Example* Perform multiple requests with different parameters.\n\n r.expr([1, 2, 3]).map(\n lambda i: r.http(\'http://httpbin.org/get\', params={\'user\': i})\n ).run(conn)\n\n*Example* Perform a `PUT` request for each item in a table.\n\n r.table(\'data\').map(\n lambda row: r.http(\'http://httpbin.org/put\', method=\'PUT\', data=row)\n ).run(conn)\n\n*Example* Perform a `POST` request with accompanying data.\n\nUsing form-encoded data:\n\n r.http(\'http://httpbin.org/post\', method=\'POST\',\n data={\'player\': \'Bob\', \'game\': \'tic tac toe\'}\n ).run(conn)\n\nUsing JSON data:\n\n r.http(\'http://httpbin.org/post\', method=\'POST\',\n data=r.expr(value).coerce_to(\'string\'),\n header={\'Content-Type\': \'application/json\'}\n ).run(conn)\n\n`r.http` supports depagination, which will request multiple pages in a row and aggregate the results into a stream. The use of this feature is controlled by the optional arguments `page` and `page_limit`. Either none or both of these arguments must be provided.\n\n* `page`: This option may specify either a built-in pagination strategy (see below), or a function to provide the next URL and/or `params` to request.\n* `page_limit`: An integer specifying the maximum number of requests to issue using the `page` functionality. This is to prevent overuse of API quotas, and must be specified with `page`.\n * `-1`: no limit\n * `0`: no requests will be made, an empty stream will be returned\n * `n`: `n` requests will be made\n\nAt the moment, the only built-in strategy is `\'link-next\'`, which is equivalent to `lambda info: info\'header\'[\'rel="next"\'].default(None)`.\n\n*Example* Perform a GitHub search and collect up to 3 pages of results.\n\n r.http("https://api.github.com/search/code?q=addClass+user:mozilla",\n page=\'link-next\', page_limit=3).run(conn)\n\nAs a function, `page` takes one parameter, an object of the format:\n\n {\n \'params\': object, # the URL parameters used in the last request\n \'header\': object, # the HTTP headers of the last response as key/value pairs\n \'body\': value # the body of the last response in the format specified by `result_format`\n }\n\nThe `header` field will be a parsed version of the header with fields lowercased, like so:\n\n {\n \'content-length\': \'1024\',\n \'content-type\': \'application/json\',\n \'date\': \'Thu, 1 Jan 1970 00:00:00 GMT\',\n \'link\': {\n \'rel="last"\': \'http://example.com/?page=34\',\n \'rel="next"\': \'http://example.com/?page=2\'\n }\n }\n\nThe `page` function may return a string corresponding to the next URL to request, `None` indicating that there is no more to get, or an object of the format:\n\n {\n \'url\': string, # the next URL to request, or None for no more pages\n \'params\': object # new URL parameters to use, will be merged with the previous request\'s params\n }\n\n*Example* Perform depagination with a custom `page` function.\n\n r.http(\'example.com/pages\',\n page=(lambda info: info[\'body\'][\'meta\'][\'next\'].default(None)),\n page_limit=5\n ).run(conn)\n\n# Learn more\n\nSee [the tutorial](http://rethinkdb.com/docs/external-api-access/) on `r.http` for more examples on how to use this command.\n'),
(rethinkdb.ast.RqlQuery.info, b"any.info() -> object\nr.info(any) -> object\n\nGet information about a ReQL value.\n\n*Example* Get information about a table such as primary key, or cache size.\n\n r.table('marvel').info().run(conn)\n\n"),
(rethinkdb.js, b'r.js(js_string[, timeout=<number>]) -> value\n\nCreate a javascript expression.\n\n*Example* Concatenate two strings using JavaScript.\n\n`timeout` is the number of seconds before `r.js` times out. The default value is 5 seconds.\n\n r.js("\'str1\' + \'str2\'").run(conn)\n\n*Example* Select all documents where the \'magazines\' field is greater than 5 by running JavaScript on the server.\n\n r.table(\'marvel\').filter(\n r.js(\'(function (row) { return row.magazines.length > 5; })\')\n ).run(conn)\n\n*Example* You may also specify a timeout in seconds (defaults to 5).\n\n r.js(\'while(true) {}\', timeout=1.3).run(conn)\n\n'),
(rethinkdb.json, b'r.json(json_string) -> value\n\nParse a JSON string on the server.\n\n*Example* Send an array to the server\'\n\n r.json("[1,2,3]").run(conn)\n\n'),
(rethinkdb.range, b'r.range() -> stream\nr.range([start_value, ]end_value) -> stream\n\nGenerate a stream of sequential integers in a specified range. `range` takes 0, 1 or 2 arguments:\n\n* With no arguments, `range` returns an "infinite" stream from 0 up to and including the maximum integer value;\n* With one argument, `range` returns a stream from 0 up to but not including the end value;\n* With two arguments, `range` returns a stream from the start value up to but not including the end value.\n\nNote that the left bound (including the implied left bound of 0 in the 0- and 1-argument form) is always closed and the right bound is always open: the start value will always be included in the returned range and the end value will *not* be included in the returned range.\n\nAny specified arguments must be integers, or a `ReqlRuntimeError` will be thrown. If the start value is equal or to higher than the end value, no error will be thrown but a zero-element stream will be returned.\n\n*Example* Return a four-element range of `[0, 1, 2, 3]`.\n\n > r.range(4).run(conn)\n \n [0, 1, 2, 3]\n\nYou can also use the [limit](http://rethinkdb.com/api/python/limit) command with the no-argument variant to achieve the same result in this case:\n\n > r.range().limit(4).run(conn)\n \n [0, 1, 2, 3]\n\n*Example* Return a range from -5 through 5.\n\n > r.range(-5, 6).run(conn)\n \n [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]\n'),
(rethinkdb.ast.RqlQuery.to_json_string, b'value.to_json_string() -> string\nvalue.to_json() -> string\n\nConvert a ReQL value or object to a JSON string. You may use either `to_json_string` or `to_json`.\n\n*Example* Get a ReQL document as a JSON string.\n\n > r.table(\'hero\').get(1).to_json()\n \n \'{"id": 1, "name": "Batman", "city": "Gotham", "powers": ["martial arts", "cinematic entrances"]}\'\n'),
(rethinkdb.ast.RqlQuery.to_json, b'value.to_json_string() -> string\nvalue.to_json() -> string\n\nConvert a ReQL value or object to a JSON string. You may use either `to_json_string` or `to_json`.\n\n*Example* Get a ReQL document as a JSON string.\n\n > r.table(\'hero\').get(1).to_json()\n \n \'{"id": 1, "name": "Batman", "city": "Gotham", "powers": ["martial arts", "cinematic entrances"]}\'\n'),
(rethinkdb.ast.RqlQuery.type_of, b'any.type_of() -> string\n\nGets the type of a value.\n\n*Example* Get the type of a string.\n\n r.expr("foo").type_of().run(conn)\n\n'),
(rethinkdb.uuid, b'r.uuid() -> string\n\nReturn a UUID (universally unique identifier), a string that can be used as a unique ID.\n\n*Example* Generate a UUID.\n\n > r.uuid().run(conn)\n \n "27961a0e-f4e8-4eb3-bf95-c5203e1d87b9"\n'),
(rethinkdb.net.Cursor.close, b'cursor.close()\n\nClose a cursor. Closing a cursor cancels the corresponding query and frees the memory\nassociated with the open request.\n\n*Example* Close a cursor.\n\n cursor.close()\n'),
(rethinkdb.net.Cursor.next, b"cursor.next([wait=True])\n\nGet the next element in the cursor.\n\nThe optional `wait` argument specifies whether to wait for the next available element and how long to wait:\n\n* `True`: Wait indefinitely (the default).\n* `False`: Do not wait at all. If data is immediately available, it will be returned; if it is not available, a `ReqlTimeoutError` will be raised.\n* number: Wait up to the specified number of seconds for data to be available before raising `ReqlTimeoutError`.\n\nThe behavior of `next` will be identical with `False`, `None` or the number `0`.\n\nCalling `next` the first time on a cursor provides the first element of the cursor. If the data set is exhausted (e.g., you have retrieved all the documents in a table), a `ReqlCursorEmpty` error will be raised when `next` is called.\n\n*Example* Retrieve the next element.\n\n cursor = r.table('superheroes').run(conn)\n doc = cursor.next()\n\n*Example* Retrieve the next element on a [changefeed](http://rethinkdb.com/docs/changefeeds/python), waiting up to five seconds.\n\n cursor = r.table('superheroes').changes().run(conn)\n doc = cursor.next(wait=5)\n\n__Note:__ RethinkDB sequences can be iterated through via the Python Iterable interface. The canonical way to retrieve all the results is to use a [for...in](../each/) loop or [list()](../to_array/).\n\n"),
(rethinkdb.ast.RqlQuery.date, b'time.date() -> time\n\nReturn a new time object only based on the day, month and year (ie. the same day at 00:00).\n\n*Example* Retrieve all the users whose birthday is today\n\n r.table("users").filter(lambda user:\n user["birthdate"].date() == r.now().date()\n ).run(conn)\n\n'),
(rethinkdb.ast.RqlQuery.day, b'time.day() -> number\n\nReturn the day of a time object as a number between 1 and 31.\n\n*Example* Return the users born on the 24th of any month.\n\n r.table("users").filter(\n r.row["birthdate"].day() == 24\n )\n\n'),
(rethinkdb.ast.RqlQuery.day_of_week, b'time.day_of_week() -> number\n\nReturn the day of week of a time object as a number between 1 and 7 (following ISO 8601 standard). For your convenience, the terms r.monday, r.tuesday etc. are defined and map to the appropriate integer.\n\n*Example* Return today\'s day of week.\n\n r.now().day_of_week().run(conn)\n\n*Example* Retrieve all the users who were born on a Tuesday.\n\n r.table("users").filter( lambda user:\n user["birthdate"].day_of_week().eq(r.tuesday)\n )\n\n'),
(rethinkdb.ast.RqlQuery.day_of_year, b'time.day_of_year() -> number\n\nReturn the day of the year of a time object as a number between 1 and 366 (following ISO 8601 standard).\n\n*Example* Retrieve all the users who were born the first day of a year.\n\n r.table("users").filter(\n r.row["birthdate"].day_of_year() == 1\n ).run(conn)\n\n'),
(rethinkdb.ast.RqlQuery.during, b'time.during(start_time, end_time[, left_bound="closed", right_bound="open"])\n -> bool\n\nReturn whether a time is between two other times. By default, this is inclusive of the start time and exclusive of the end time. Set `left_bound` and `right_bound` to explicitly include (`closed`) or exclude (`open`) that endpoint of the range.\n\n*Example* Retrieve all the posts that were posted between December 1st, 2013 (inclusive) and December 10th, 2013 (exclusive).\n\n r.table("posts").filter(\n r.row[\'date\'].during(r.time(2013, 12, 1, "Z"), r.time(2013, 12, 10, "Z"))\n ).run(conn)\n\n*Example* Retrieve all the posts that were posted between December 1st, 2013 (exclusive) and December 10th, 2013 (inclusive).\n\n r.table("posts").filter(\n r.row[\'date\'].during(r.time(2013, 12, 1, "Z"), r.time(2013, 12, 10, "Z"), left_bound="open", right_bound="closed")\n ).run(conn)\n\n'),
(rethinkdb.epoch_time, b'r.epoch_time(epoch_time) -> time\n\nCreate a time object based on seconds since epoch. The first argument is a double and\nwill be rounded to three decimal places (millisecond-precision).\n\n*Example* Update the birthdate of the user "John" to November 3rd, 1986.\n\n r.table("user").get("John").update({"birthdate": r.epoch_time(531360000)}).run(conn)\n\n'),
(rethinkdb.ast.RqlQuery.hours, b'time.hours() -> number\n\nReturn the hour in a time object as a number between 0 and 23.\n\n*Example* Return all the posts submitted after midnight and before 4am.\n\n r.table("posts").filter(lambda post:\n post["date"].hours() < 4\n ).run(conn)\n\n'),
(rethinkdb.ast.RqlQuery.in_timezone, b"time.in_timezone(timezone) -> time\n\nReturn a new time object with a different timezone. While the time stays the same, the results returned by methods such as hours() will change since they take the timezone into account. The timezone argument has to be of the ISO 8601 format.\n\n*Example* Hour of the day in San Francisco (UTC/GMT -8, without daylight saving time).\n\n r.now().in_timezone('-08:00').hours().run(conn)\n"),
(rethinkdb.iso8601, b'r.iso8601(iso8601Date[, default_timezone=\'\']) -> time\n\nCreate a time object based on an ISO 8601 date-time string (e.g. \'2013-01-01T01:01:01+00:00\'). We support all valid ISO 8601 formats except for week dates. If you pass an ISO 8601 date-time without a time zone, you must specify the time zone with the `default_timezone` argument. Read more about the ISO 8601 format at [Wikipedia](http://en.wikipedia.org/wiki/ISO_8601).\n\n*Example* Update the time of John\'s birth.\n\n r.table("user").get("John").update({"birth": r.iso8601(\'1986-11-03T08:30:00-07:00\')}).run(conn)\n'),
(rethinkdb.ast.RqlQuery.minutes, b'time.minutes() -> number\n\nReturn the minute in a time object as a number between 0 and 59.\n\n*Example* Return all the posts submitted during the first 10 minutes of every hour.\n\n r.table("posts").filter(lambda post:\n post["date"].minutes() < 10\n ).run(conn)\n'),
(rethinkdb.ast.RqlQuery.month, b'time.month() -> number\n\nReturn the month of a time object as a number between 1 and 12. For your convenience, the terms r.january, r.february etc. are defined and map to the appropriate integer.\n\n*Example* Retrieve all the users who were born in November.\n\n r.table("users").filter(\n r.row["birthdate"].month() == 11\n )\n\n*Example* Retrieve all the users who were born in November.\n\n r.table("users").filter(\n r.row["birthdate"].month() == r.november\n )\n\n'),
(rethinkdb.now, b'r.now() -> time\n\nReturn a time object representing the current time in UTC. The command now() is computed once when the server receives the query, so multiple instances of r.now() will always return the same time inside a query.\n\n*Example* Add a new user with the time at which he subscribed.\n\n r.table("users").insert({\n "name": "John",\n "subscription_date": r.now()\n }).run(conn)\n\n'),
(rethinkdb.ast.RqlQuery.seconds, b'time.seconds() -> number\n\nReturn the seconds in a time object as a number between 0 and 59.999 (double precision).\n\n*Example* Return the post submitted during the first 30 seconds of every minute.\n\n r.table("posts").filter(lambda post:\n post["date"].seconds() < 30\n ).run(conn)\n\n'),
(rethinkdb.time, b'r.time(year, month, day[, hour, minute, second], timezone)\n -> time\n\nCreate a time object for a specific time.\n\nA few restrictions exist on the arguments:\n\n- `year` is an integer between 1400 and 9,999.\n- `month` is an integer between 1 and 12.\n- `day` is an integer between 1 and 31.\n- `hour` is an integer.\n- `minutes` is an integer.\n- `seconds` is a double. Its value will be rounded to three decimal places\n(millisecond-precision).\n- `timezone` can be `\'Z\'` (for UTC) or a string with the format `\xc2\xb1[hh]:[mm]`.\n\n*Example* Update the birthdate of the user "John" to November 3rd, 1986 UTC.\n\n r.table("user").get("John").update({"birthdate": r.time(1986, 11, 3, \'Z\')}).run(conn)\n\n'),
(rethinkdb.ast.RqlQuery.time_of_day, b'time.time_of_day() -> number\n\nReturn the number of seconds elapsed since the beginning of the day stored in the time object.\n\n*Example* Retrieve posts that were submitted before noon.\n\n r.table("posts").filter(\n r.row["date"].time_of_day() <= 12*60*60\n ).run(conn)\n\n'),
(rethinkdb.ast.RqlQuery.timezone, b'time.timezone() -> string\n\nReturn the timezone of the time object.\n\n*Example* Return all the users in the "-07:00" timezone.\n\n r.table("users").filter(lambda user:\n user["subscriptionDate"].timezone() == "-07:00"\n )\n\n'),
(rethinkdb.ast.RqlQuery.to_epoch_time, b'time.to_epoch_time() -> number\n\nConvert a time object to its epoch time.\n\n*Example* Return the current time in seconds since the Unix Epoch with millisecond-precision.\n\n r.now().to_epoch_time()\n\n'),
(rethinkdb.ast.RqlQuery.to_iso8601, b'time.to_iso8601() -> string\n\nConvert a time object to a string in ISO 8601 format.\n\n*Example* Return the current ISO 8601 time.\n\n > r.now().to_iso8601().run(conn)\n \n "2015-04-20T18:37:52.690+00:00"\n\n'),
(rethinkdb.ast.RqlQuery.year, b'time.year() -> number\n\nReturn the year of a time object.\n\n*Example* Retrieve all the users born in 1986.\n\n r.table("users").filter(lambda user:\n user["birthdate"].year() == 1986\n ).run(conn)\n\n'),
(rethinkdb.ast.RqlQuery.append, b"array.append(value) -> array\n\nAppend a value to an array.\n\n*Example* Retrieve Iron Man's equipment list with the addition of some new boots.\n\n r.table('marvel').get('IronMan')['equipment'].append('newBoots').run(conn)\n\n"),
(rethinkdb.ast.RqlQuery.__getitem__, b"sequence[attr] -> sequence\nsingleSelection[attr] -> value\nobject[attr] -> value\narray[index] -> value\n\nGet a single field from an object. If called on a sequence, gets that field from every object in the sequence, skipping objects that lack it.\n\n*Example* What was Iron Man's first appearance in a comic?\n\n r.table('marvel').get('IronMan')['firstAppearance'].run(conn)\n\nThe `[]` command also accepts integer arguments as array offsets, like the [nth](http://rethinkdb.com/api/python/nth) command.\n\n*Example* Get the fourth element in a sequence. (The first element is position `0`, so the fourth element is position `3`.)\n\n r.expr([10, 20, 30, 40, 50])[3]\n \n 40\n"),
(rethinkdb.ast.RqlQuery.change_at, b'array.change_at(index, value) -> array\n\nChange a value in an array at a given index. Returns the modified array.\n\n*Example* Bruce Banner hulks out.\n\n r.expr(["Iron Man", "Bruce", "Spider-Man"]).change_at(1, "Hulk").run(conn)\n'),
(rethinkdb.ast.RqlQuery.delete_at, b"array.delete_at(index [,endIndex]) -> array\n\nRemove one or more elements from an array at a given index. Returns the modified array. (Note: `delete_at` operates on arrays, not documents; to delete documents, see the [delete](http://rethinkdb.com/api/python/delete) command.)\n\nIf only `index` is specified, `delete_at` removes the element at that index. If both `index` and `end_index` are specified, `delete_at` removes the range of elements between `index` and `end_index`, inclusive of `index` but not inclusive of `end_index`.\n\nIf `end_index` is specified, it must not be less than `index`. Both `index` and `end_index` must be within the array's bounds (i.e., if the array has 10 elements, an `index` or `end_index` of 10 or higher is invalid).\n\nBy using a negative `index` you can delete from the end of the array. `-1` is the last element in the array, `-2` is the second-to-last element, and so on. You may specify a negative `end_index`, although just as with a positive value, this will not be inclusive. The range `(2,-1)` specifies the third element through the next-to-last element.\n\n*Example* Delete the second element of an array.\n\n > r.expr(['a','b','c','d','e','f']).delete_at(1).run(conn)\n \n ['a', 'c', 'd', 'e', 'f']\n\n*Example* Delete the second and third elements of an array.\n\n > r.expr(['a','b','c','d','e','f']).delete_at(1,3).run(conn)\n \n ['a', 'd', 'e', 'f']\n\n*Example* Delete the next-to-last element of an array.\n\n > r.expr(['a','b','c','d','e','f']).delete_at(-2).run(conn)\n \n ['a', 'b', 'c', 'd', 'f']\n\n*Example* Delete a comment on a post.\n\nGiven a post document such as:\n\n{\n id: '4cf47834-b6f9-438f-9dec-74087e84eb63',\n title: 'Post title',\n author: 'Bob',\n comments: [\n { author: 'Agatha', text: 'Comment 1' },\n { author: 'Fred', text: 'Comment 2' }\n ]\n}\n\nThe second comment can be deleted by using `update` and `delete_at` together.\n\n r.table('posts').get('4cf47834-b6f9-438f-9dec-74087e84eb63').update(\n lambda post: { 'comments': post['comments'].delete_at(1) }\n ).run(conn)\n"),
(rethinkdb.ast.RqlQuery.difference, b"array.difference(array) -> array\n\nRemove the elements of one array from another array.\n\n*Example* Retrieve Iron Man's equipment list without boots.\n\n r.table('marvel').get('IronMan')['equipment'].difference(['Boots']).run(conn)\n\n*Example* Remove Iron Man's boots from his equipment.\n\n r.table('marvel').get('IronMan')[:equipment].update(lambda doc:\n {'equipment': doc['equipment'].difference(['Boots'])}\n ).run(conn)\n"),
(rethinkdb.ast.RqlQuery.get_field, b"sequence.get_field(attr) -> sequence\nsingleSelection.get_field(attr) -> value\nobject.get_field(attr) -> value\n\nGet a single field from an object. If called on a sequence, gets that field from every\nobject in the sequence, skipping objects that lack it.\n\n*Example* What was Iron Man's first appearance in a comic?\n\n r.table('marvel').get('IronMan').get_field('firstAppearance').run(conn)\n"),
(rethinkdb.ast.RqlQuery.has_fields, b'sequence.has_fields([selector1, selector2...]) -> stream\narray.has_fields([selector1, selector2...]) -> array\nobject.has_fields([selector1, selector2...]) -> boolean\n\nTest if an object has one or more fields. An object has a field if it has that key and the key has a non-null value. For instance, the object `{\'a\': 1,\'b\': 2,\'c\': null}` has the fields `a` and `b`.\n\nWhen applied to a single object, `has_fields` returns `true` if the object has the fields and `false` if it does not. When applied to a sequence, it will return a new sequence (an array or stream) containing the elements that have the specified fields.\n\n*Example* Return the players who have won games.\n\n r.table(\'players\').has_fields(\'games_won\').run(conn)\n\n*Example* Return the players who have *not* won games. To do this, use `has_fields` with [not](http://rethinkdb.com/api/python/not), wrapped with [filter](http://rethinkdb.com/api/python/filter).\n\n r.table(\'players\').filter(~r.row.has_fields(\'games_won\')).run(conn)\n\n*Example* Test if a specific player has won any games.\n\n r.table(\'players\').get(\n \'b5ec9714-837e-400c-aa74-dbd35c9a7c4c\').has_fields(\'games_won\').run(conn)\n\n**Nested Fields**\n\n`has_fields` lets you test for nested fields in objects. If the value of a field is itself a set of key/value pairs, you can test for the presence of specific keys.\n\n*Example* In the `players` table, the `games_won` field contains one or more fields for kinds of games won:\n\n {\n \'games_won\': {\n \'playoffs\': 2,\n \'championships\': 1\n }\n }\n\nReturn players who have the "championships" field.\n\n r.table(\'players\').has_fields({\'games_won\': {\'championships\': true}}).run(conn)\n\nNote that `true` in the example above is testing for the existence of `championships` as a field, not testing to see if the value of the `championships` field is set to `true`. There\'s a more convenient shorthand form available. (See [pluck](http://rethinkdb.com/api/python/pluck) for more details on this.)\n\n r.table(\'players\').has_fields({\'games_won\': \'championships\'}).run(conn)\n'),
(rethinkdb.ast.RqlQuery.insert_at, b'array.insert_at(index, value) -> array\n\nInsert a value in to an array at a given index. Returns the modified array.\n\n*Example* Hulk decides to join the avengers.\n\n r.expr(["Iron Man", "Spider-Man"]).insert_at(1, "Hulk").run(conn)\n\n'),
(rethinkdb.ast.RqlQuery.keys, b'singleSelection.keys() -> array\nobject.keys() -> array\n\nReturn an array containing all of an object\'s keys. Note that the keys will be sorted as described in [ReQL data types](http://rethinkdb.com/docs/data-types/#sorting-order) (for strings, lexicographically).\n\n*Example* Get all the keys from a table row.\n\n # row: { "id": 1, "mail": "fred@example.com", "name": "fred" }\n \n r.table(\'users\').get(1).keys().run(conn)\n \n > [ "id", "mail", "name" ]\n'),
(rethinkdb.literal, b'r.literal(object) -> special\n\nReplace an object in a field instead of merging it with an existing object in a `merge` or `update` operation. = Using `literal` with no arguments in a `merge` or `update` operation will remove the corresponding field.\n\n*Example* Replace one nested document with another rather than merging the fields.\n\nAssume your users table has this structure:\n\n [\n {\n "id": 1,\n "name": "Alice",\n "data": {\n "age": 18,\n "city": "Dallas"\n }\n } \n ...\n ]\n\nUsing `update` to modify the `data` field will normally merge the nested documents:\n\n r.table(\'users\').get(1).update({ \'data\': { \'age\': 19, \'job\': \'Engineer\' } }).run(conn)\n \n {\n "id": 1,\n "name": "Alice",\n "data": {\n "age": 19,\n "city": "Dallas",\n "job": "Engineer"\n }\n } \n\nThat will preserve `city` and other existing fields. But to replace the entire `data` document with a new object, use `literal`:\n\n r.table(\'users\').get(1).update({ \'data\': r.literal({ \'age\': 19, \'job\': \'Engineer\' }) }).run(conn)\n \n {\n "id": 1,\n "name": "Alice",\n "data": {\n "age": 19,\n "job": "Engineer"\n }\n } \n\n*Example* Use `literal` to remove a field from a document.\n\n r.table(\'users\').get(1).merge({ "data": r.literal() }).run(conn)\n \n {\n "id": 1,\n "name": "Alice"\n }\n'),
(rethinkdb.ast.RqlQuery.merge, b'singleSelection.merge([object | function, object | function, ...]) -> object\nobject.merge([object | function, object | function, ...]) -> object\nsequence.merge([object | function, object | function, ...]) -> stream\narray.merge([object | function, object | function, ...]) -> array\n\nMerge two or more objects together to construct a new object with properties from all. When there is a conflict between field names, preference is given to fields in the rightmost object in the argument list `merge` also accepts a subquery function that returns an object, which will be used similarly to a [map](http://rethinkdb.com/api/python/map/) function.\n\n*Example* Equip Thor for battle.\n\n r.table(\'marvel\').get(\'thor\').merge(\n r.table(\'equipment\').get(\'hammer\'),\n r.table(\'equipment\').get(\'pimento_sandwich\')\n ).run(conn)\n\n*Example* Equip every hero for battle, using a subquery function to retrieve their weapons.\n\n r.table(\'marvel\').merge(lambda hero:\n { \'weapons\': r.table(\'weapons\').get(hero[\'weapon_id\']) }\n ).run(conn)\n\n*Example* Use `merge` to join each blog post with its comments.\n\nNote that the sequence being merged—in this example, the comments—must be coerced from a selection to an array. Without `coerce_to` the operation will throw an error ("Expected type DATUM but found SELECTION").\n\n r.table(\'posts\').merge(lambda post:\n { \'comments\': r.table(\'comments\').get_all(post[\'id\'],\n index=\'post_id\').coerce_to(\'array\') }\n ).run(conn)\n\n*Example* Merge can be used recursively to modify object within objects.\n\n r.expr({\'weapons\' : {\'spectacular graviton beam\' : {\'dmg\' : 10, \'cooldown\' : 20}}}).merge(\n {\'weapons\' : {\'spectacular graviton beam\' : {\'dmg\' : 10}}}\n ).run(conn)\n\n*Example* To replace a nested object with another object you can use the literal keyword.\n\n r.expr({\'weapons\' : {\'spectacular graviton beam\' : {\'dmg\' : 10, \'cooldown\' : 20}}}).merge(\n {\'weapons\' : r.literal({\'repulsor rays\' : {\'dmg\' : 3, \'cooldown\' : 0}})}\n ).run(conn)\n\n*Example* Literal can be used to remove keys from an object as well.\n\n r.expr({\'weapons\' : {\'spectacular graviton beam\' : {\'dmg\' : 10, \'cooldown\' : 20}}}).merge(\n {\'weapons\' : {\'spectacular graviton beam\' : r.literal()}}\n ).run(conn)\n\n'),
(rethinkdb.object, b'r.object([key, value,]...) -> object\n\nCreates an object from a list of key-value pairs, where the keys must\nbe strings. `r.object(A, B, C, D)` is equivalent to\n`r.expr([[A, B], [C, D]]).coerce_to(\'OBJECT\')`.\n\n*Example* Create a simple object.\n\n > r.object(\'id\', 5, \'data\', [\'foo\', \'bar\']).run(conn)\n {\'data\': ["foo", "bar"], \'id\': 5}\n'),
(rethinkdb.ast.RqlQuery.pluck, b"sequence.pluck([selector1, selector2...]) -> stream\narray.pluck([selector1, selector2...]) -> array\nobject.pluck([selector1, selector2...]) -> object\nsingleSelection.pluck([selector1, selector2...]) -> object\n\nPlucks out one or more attributes from either an object or a sequence of objects\n(projection).\n\n*Example* We just need information about IronMan's reactor and not the rest of the\ndocument.\n\n r.table('marvel').get('IronMan').pluck('reactorState', 'reactorPower').run(conn)\n\n*Example* For the hero beauty contest we only care about certain qualities.\n\n r.table('marvel').pluck('beauty', 'muscleTone', 'charm').run(conn)\n\n*Example* Pluck can also be used on nested objects.\n\n r.table('marvel').pluck({'abilities' : {'damage' : True, 'mana_cost' : True}, 'weapons' : True}).run(conn)\n\n*Example* The nested syntax can quickly become overly verbose so there's a shorthand\nfor it.\n\n r.table('marvel').pluck({'abilities' : ['damage', 'mana_cost']}, 'weapons').run(conn)\n\nFor more information read the [nested field documentation](http://rethinkdb.com/docs/nested-fields/).\n"),
(rethinkdb.ast.RqlQuery.prepend, b"array.prepend(value) -> array\n\nPrepend a value to an array.\n\n*Example* Retrieve Iron Man's equipment list with the addition of some new boots.\n\n r.table('marvel').get('IronMan')['equipment'].prepend('newBoots').run(conn)\n"),
(rethinkdb.row, b"r.row -> value\n\nReturns the currently visited document. Note that `row` does not work within subqueries to access nested documents; you should use anonymous functions to access those documents instead. (See the last example.)\n\n*Example* Get all users whose age is greater than 5.\n\n r.table('users').filter(r.row['age'] > 5).run(conn)\n\n*Example* Access the attribute 'child' of an embedded document.\n\n r.table('users').filter(r.row['embedded_doc']['child'] > 5).run(conn)\n\n*Example* Add 1 to every element of an array.\n\n r.expr([1, 2, 3]).map(r.row + 1).run(conn)\n\n*Example* For nested queries, use functions instead of `row`.\n\n r.table('users').filter(\n lambda doc: doc['name'] == r.table('prizes').get('winner')\n ).run(conn)\n\n"),
(rethinkdb.ast.RqlQuery.set_difference, b"array.set_difference(array) -> array\n\nRemove the elements of one array from another and return them as a set (an array with\ndistinct values).\n\n*Example* Check which pieces of equipment Iron Man has, excluding a fixed list.\n\n r.table('marvel').get('IronMan')['equipment'].set_difference(['newBoots', 'arc_reactor']).run(conn)\n"),
(rethinkdb.ast.RqlQuery.set_insert, b"array.set_insert(value) -> array\n\nAdd a value to an array and return it as a set (an array with distinct values).\n\n*Example* Retrieve Iron Man's equipment list with the addition of some new boots.\n\n r.table('marvel').get('IronMan')['equipment'].set_insert('newBoots').run(conn)\n\n"),
(rethinkdb.ast.RqlQuery.set_intersection, b"array.set_intersection(array) -> array\n\nIntersect two arrays returning values that occur in both of them as a set (an array with\ndistinct values).\n\n*Example* Check which pieces of equipment Iron Man has from a fixed list.\n\n r.table('marvel').get('IronMan')['equipment'].set_intersection(['newBoots', 'arc_reactor']).run(conn)\n\n"),
(rethinkdb.ast.RqlQuery.set_union, b"array.set_union(array) -> array\n\nAdd a several values to an array and return it as a set (an array with distinct values).\n\n*Example* Retrieve Iron Man's equipment list with the addition of some new boots and an arc reactor.\n\n r.table('marvel').get('IronMan')['equipment'].set_union(['newBoots', 'arc_reactor']).run(conn)\n\n"),
(rethinkdb.ast.RqlQuery.splice_at, b'array.splice_at(index, array) -> array\n\nInsert several values in to an array at a given index. Returns the modified array.\n\n*Example* Hulk and Thor decide to join the avengers.\n\n r.expr(["Iron Man", "Spider-Man"]).splice_at(1, ["Hulk", "Thor"]).run(conn)\n'),
(rethinkdb.ast.RqlQuery.values, b'singleSelection.values() -> array\nobject.values() -> array\n\nReturn an array containing all of an object\'s values. `values()` guarantees the values will come out in the same order as [keys](http://rethinkdb.com/api/python/keys).\n\n*Example* Get all of the values from a table row.\n\n # row: { "id": 1, "mail": "fred@example.com", "name": "fred" }\n \n r.table(\'users\').get(1).values().run(conn)\n \n > [ 1, "fred@example.com", "fred" ]\n'),
(rethinkdb.ast.RqlQuery.without, b"sequence.without([selector1, selector2...]) -> stream\narray.without([selector1, selector2...]) -> array\nsingleSelection.without([selector1, selector2...]) -> object\nobject.without([selector1, selector2...]) -> object\n\nThe opposite of pluck; takes an object or a sequence of objects, and returns them with\nthe specified paths removed.\n\n*Example* Since we don't need it for this computation we'll save bandwidth and leave\nout the list of IronMan's romantic conquests.\n\n r.table('marvel').get('IronMan').without('personalVictoriesList').run(conn)\n\n*Example* Without their prized weapons, our enemies will quickly be vanquished.\n\n r.table('enemies').without('weapons').run(conn)\n\n*Example* Nested objects can be used to remove the damage subfield from the weapons and abilities fields.\n\n r.table('marvel').without({'weapons' : {'damage' : True}, 'abilities' : {'damage' : True}}).run(conn)\n\n*Example* The nested syntax can quickly become overly verbose so there's a shorthand for it.\n\n r.table('marvel').without({'weapons' : 'damage', 'abilities' : 'damage'}).run(conn)\n\n"),
(rethinkdb.circle, b"r.circle([longitude, latitude], radius[, num_vertices=32, geo_system='WGS84', unit='m', fill=True]) -> geometry\nr.circle(point, radius[, {num_vertices=32, geo_system='WGS84', unit='m', fill=True]) -> geometry\n\nConstruct a circular line or polygon. A circle in RethinkDB is a polygon or line *approximating* a circle of a given radius around a given center, consisting of a specified number of vertices (default 32).\n\nThe center may be specified either by two floating point numbers, the latitude (−90 to 90) and longitude (−180 to 180) of the point on a perfect sphere (see [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system), or by a point object. The radius is a floating point number whose units are meters by default, although that may be changed with the `unit` argument.\n\nOptional arguments available with `circle` are:\n\n* `num_vertices`: the number of vertices in the polygon or line. Defaults to 32.\n* `geo_system`: the reference ellipsoid to use for geographic coordinates. Possible values are `WGS84` (the default), a common standard for Earth's geometry, or `unit_sphere`, a perfect sphere of 1 meter radius.\n* `unit`: Unit for the radius distance. Possible values are `m` (meter, the default), `km` (kilometer), `mi` (international mile), `nm` (nautical mile), `ft` (international foot).\n* `fill`: if `True` (the default) the circle is filled, creating a polygon; if `False` the circle is unfilled (creating a line).\n\n*Example* Define a circle.\n\n r.table('geo').insert({\n 'id': 300,\n 'name': 'Hayes Valley',\n 'neighborhood': r.circle([-122.423246, 37.779388], 1000)\n }).run(conn)\n"),
(rethinkdb.ast.RqlQuery.distance, b"geometry.distance(geometry[, geo_system='WGS84', unit='m']) -> number\nr.distance(geometry, geometry[, geo_system='WGS84', unit='m']) -> number\n\nCompute the distance between a point and another geometry object. At least one of the geometry objects specified must be a point.\n\nOptional arguments available with `distance` are:\n\n* `geo_system`: the reference ellipsoid to use for geographic coordinates. Possible values are `WGS84` (the default), a common standard for Earth's geometry, or `unit_sphere`, a perfect sphere of 1 meter radius.\n* `unit`: Unit to return the distance in. Possible values are `m` (meter, the default), `km` (kilometer), `mi` (international mile), `nm` (nautical mile), `ft` (international foot).\n\nIf one of the objects is a polygon or a line, the point will be projected onto the line or polygon assuming a perfect sphere model before the distance is computed (using the model specified with `geo_system`). As a consequence, if the polygon or line is extremely large compared to Earth's radius and the distance is being computed with the default WGS84 model, the results of `distance` should be considered approximate due to the deviation between the ellipsoid and spherical models.\n\n*Example* Compute the distance between two points on the Earth in kilometers.\n\n > point1 = r.point(-122.423246, 37.779388)\n > point2 = r.point(-117.220406, 32.719464)\n > r.distance(point1, point2, unit='km').run(conn)\n \n 734.1252496021841\n"),
(rethinkdb.ast.RqlQuery.fill, b"line.fill() -> polygon\n\nConvert a Line object into a Polygon object. If the last point does not specify the same coordinates as the first point, `polygon` will close the polygon by connecting them.\n\nLongitude (−180 to 180) and latitude (−90 to 90) of vertices are plotted on a perfect sphere. See [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system.\n\nIf the last point does not specify the same coordinates as the first point, `polygon` will close the polygon by connecting them. You cannot directly construct a polygon with holes in it using `polygon`, but you can use [polygon_sub](http://rethinkdb.com/api/python/polygon_sub) to use a second polygon within the interior of the first to define a hole.\n\n*Example* Create a line object and then convert it to a polygon.\n\n r.table('geo').insert({\n 'id': 201,\n 'rectangle': r.line(\n [-122.423246, 37.779388],\n [-122.423246, 37.329898],\n [-121.886420, 37.329898],\n [-121.886420, 37.779388]\n )\n }).run(conn)\n \n r.table('geo').get(201).update({\n 'rectangle': r.row['rectangle'].fill()\n }, non_atomic=True).run(conn)\n"),
(rethinkdb.geojson, b"r.geojson(geojson) -> geometry\n\nConvert a GeoJSON object to a ReQL geometry object.\n\nRethinkDB only allows conversion of GeoJSON objects which have ReQL equivalents: Point, LineString, and Polygon. MultiPoint, MultiLineString, and MultiPolygon are not supported. (You could, however, store multiple points, lines and polygons in an array and use a geospatial multi index with them.)\n\nOnly longitude/latitude coordinates are supported. GeoJSON objects that use Cartesian coordinates, specify an altitude, or specify their own coordinate reference system will be rejected.\n\n*Example* Convert a GeoJSON object to a ReQL geometry object.\n\n geo_json = {\n 'type': 'Point',\n 'coordinates': [ -122.423246, 37.779388 ]\n }\n r.table('geo').insert({\n 'id': 'sfo',\n 'name': 'San Francisco',\n 'location': r.geojson(geo_json)\n }).run(conn)\n"),
(rethinkdb.ast.Table.get_intersecting, b"table.get_intersecting(geometry, index='indexname') -> selection<stream>\n\nGet all documents where the given geometry object intersects the geometry object of the requested geospatial index.\n\nThe `index` argument is mandatory. This command returns the same results as `table.filter(r.row('index').intersects(geometry))`. The total number of results is limited to the array size limit which defaults to 100,000, but can be changed with the `array_limit` option to [run](http://rethinkdb.com/api/python/run).\n\n*Example* Which of the locations in a list of parks intersect `circle1`?\n\n circle1 = r.circle([-117.220406, 32.719464], 10, unit='mi')\n r.table('parks').get_intersecting(circle1, index='area').run(conn)\n"),
(rethinkdb.ast.Table.get_nearest, b"table.get_nearest(point, index='indexname'[, max_results=100, max_dist=100000, unit='m', geo_system='WGS84']) -> array\n\nGet all documents where the specified geospatial index is within a certain distance of the specified point (default 100 kilometers).\n\nThe `index` argument is mandatory. Optional arguments are:\n\n* `max_results`: the maximum number of results to return (default 100).\n* `unit`: Unit for the distance. Possible values are `m` (meter, the default), `km` (kilometer), `mi` (international mile), `nm` (nautical mile), `ft` (international foot).\n* `max_dist`: the maximum distance from an object to the specified point (default 100 km).\n* `geo_system`: the reference ellipsoid to use for geographic coordinates. Possible values are `WGS84` (the default), a common standard for Earth's geometry, or `unit_sphere`, a perfect sphere of 1 meter radius.\n\nThe return value will be an array of two-item objects with the keys `dist` and `doc`, set to the distance between the specified point and the document (in the units specified with `unit`, defaulting to meters) and the document itself, respectively.\n\n*Example* Return a list of enemy hideouts within 5000 meters of the secret base.\n\n secret_base = r.point(-122.422876, 37.777128)\n r.table('hideouts').get_nearest(secret_base, index='location',\n max_dist=5000).run(conn)\n"),
(rethinkdb.ast.RqlQuery.includes, b"sequence.includes(geometry) -> sequence\ngeometry.includes(geometry) -> bool\n\nTests whether a geometry object is completely contained within another. When applied to a sequence of geometry objects, `includes` acts as a [filter](http://rethinkdb.com/api/python/filter), returning a sequence of objects from the sequence that include the argument.\n\n*Example* Is `point2` included within a 2000-meter circle around `point1`?\n\n > point1 = r.point(-117.220406, 32.719464)\n > point2 = r.point(-117.206201, 32.725186)\n > r.circle(point1, 2000).includes(point2).run(conn)\n \n True\n\n*Example* Which of the locations in a list of parks include `circle1`?\n\n circle1 = r.circle([-117.220406, 32.719464], 10, unit='mi')\n r.table('parks')['area'].includes(circle1).run(conn)\n"),
(rethinkdb.ast.RqlQuery.intersects, b"sequence.intersects(geometry) -> sequence\ngeometry.intersects(geometry) -> bool\nr.intersects(sequence, geometry) -> sequence\nr.intersects(geometry, geometry) -> bool\n\nTests whether two geometry objects intersect with one another. When applied to a sequence of geometry objects, `intersects` acts as a [filter](http://rethinkdb.com/api/python/filter), returning a sequence of objects from the sequence that intersect with the argument.\n\n*Example* Is `point2` within a 2000-meter circle around `point1`?\n\n > point1 = r.point(-117.220406, 32.719464)\n > point2 = r.point(-117.206201, 32.725186)\n > r.circle(point1, 2000).intersects(point2).run(conn)\n \n True\n\n*Example* Which of the locations in a list of parks intersect `circle1`?\n\n circle1 = r.circle([-117.220406, 32.719464], 10, unit='mi')\n r.table('parks')('area').intersects(circle1).run(conn)\n"),
(rethinkdb.line, b"r.line([lon1, lat1], [lon2, lat2], ...) -> line\nr.line(point1, point2, ...) -> line\n\nConstruct a geometry object of type Line. The line can be specified in one of two ways:\n\n* Two or more two-item arrays, specifying latitude and longitude numbers of the line's vertices;\n* Two or more [Point](http://rethinkdb.com/api/python/point) objects specifying the line's vertices.\n\nLongitude (−180 to 180) and latitude (−90 to 90) of vertices are plotted on a perfect sphere. See [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system.\n\n*Example* Define a line.\n\n r.table('geo').insert({\n 'id': 101,\n 'route': r.line([-122.423246, 37.779388], [-121.886420, 37.329898])\n }).run(conn)\n\n*Example* Define a line using an array of points.\n\nYou can use the [args](http://rethinkdb.com/api/python/args) command to pass an array of Point objects (or latitude-longitude pairs) to `line`.\n\n var route = [\n [-122.423246, 37.779388],\n [-121.886420, 37.329898]\n ]\n r.table('geo').insert({\n 'id': 102,\n 'route': r.line(r.args(route))\n }).run(conn)\n"),
(rethinkdb.point, b"r.point(longitude, latitude) -> point\n\nConstruct a geometry object of type Point. The point is specified by two floating point numbers, the longitude (−180 to 180) and latitude (−90 to 90) of the point on a perfect sphere. See [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system.\n\n*Example* Define a point.\n\n r.table('geo').insert({\n 'id': 1,\n 'name': 'San Francisco',\n 'location': r.point(-122.423246, 37.779388)\n }).run(conn)\n"),
(rethinkdb.polygon, b"r.polygon([lon1, lat1], [lon2, lat2], [lon3, lat3], ...) -> polygon\nr.polygon(point1, point2, point3, ...) -> polygon\n\nConstruct a geometry object of type Polygon. The Polygon can be specified in one of two ways:\n\n* Three or more two-item arrays, specifying latitude and longitude numbers of the polygon's vertices;\n* Three or more [Point](http://rethinkdb.com/api/python/point) objects specifying the polygon's vertices.\n\nLongitude (−180 to 180) and latitude (−90 to 90) of vertices are plotted on a perfect sphere. See [Geospatial support](http://rethinkdb.com/docs/geo-support/) for more information on ReQL's coordinate system.\n\nIf the last point does not specify the same coordinates as the first point, `polygon` will close the polygon by connecting them. You cannot directly construct a polygon with holes in it using `polygon`, but you can use [polygon_sub](http://rethinkdb.com/api/python/polygon_sub) to use a second polygon within the interior of the first to define a hole.\n\n*Example* Define a polygon.\n\n r.table('geo').insert({\n 'id': 101,\n 'rectangle': r.polygon(\n [-122.423246, 37.779388],\n [-122.423246, 37.329898],\n [-121.886420, 37.329898],\n [-121.886420, 37.779388]\n )\n }).run(conn)\n\n*Example* Define a polygon using an array of vertices.\n\nYou can use the [args](http://rethinkdb.com/api/python/args) command to pass an array of Point objects (or latitude-longitude pairs) to `polygon`.\n\n vertices = [\n [-122.423246, 37.779388],\n [-122.423246, 37.329898],\n [-121.886420, 37.329898],\n [-121.886420, 37.779388]\n ]\n r.table('geo').insert({\n 'id': 102,\n 'rectangle': r.polygon(r.args(vertices))\n }).run(conn)\n"),
(rethinkdb.ast.RqlQuery.polygon_sub, b'polygon1.polygon_sub(polygon2) -> polygon\n\nUse `polygon2` to "punch out" a hole in `polygon1`. `polygon2` must be completely contained within `polygon1` and must have no holes itself (it must not be the output of `polygon_sub` itself).\n\n*Example* Define a polygon with a hole punched in it.\n\n outer_polygon = r.polygon(\n [-122.4, 37.7],\n [-122.4, 37.3],\n [-121.8, 37.3],\n [-121.8, 37.7]\n )\n inner_polygon = r.polygon(\n [-122.3, 37.4],\n [-122.3, 37.6],\n [-122.0, 37.6],\n [-122.0, 37.4]\n )\n outer_polygon.polygon_sub(inner_polygon).run(conn)\n'),
(rethinkdb.ast.RqlQuery.to_geojson, b"geometry.to_geojson() -> object\n\nConvert a ReQL geometry object to a GeoJSON object.\n\n*Example* Convert a ReQL geometry object to a GeoJSON object.\n\n > r.table('geo').get('sfo')['location'].to_geojson().run(conn)\n \n {\n 'type': 'Point',\n 'coordinates': [ -122.423246, 37.779388 ]\n }\n"),
(rethinkdb.ast.RqlQuery.eq_join, b'sequence.eq_join(left_field, right_table[, index=\'id\']) -> sequence\nsequence.eq_join(predicate_function, right_table[, index=\'id\']) -> sequence\n\nJoin tables using a field or function on the left-hand sequence matching primary keys or secondary indexes on the right-hand table. `eq_join` is more efficient than other ReQL join types, and operates much faster. Documents in the result set consist of pairs of left-hand and right-hand documents, matched when the field on the left-hand side exists and is non-null and an entry with that field\'s value exists in the specified index on the right-hand side.\n\nThe result set of `eq_join` is a stream or array of objects. Each object in the returned set will be an object of the form `{ left: <left-document>, right: <right-document> }`, where the values of `left` and `right` will be the joined documents. Use the <code><a href="/api/python/zip/">zip</a></code> command to merge the `left` and `right` fields together.\n\n**Example:** Match players with the games they\'ve played against one another.\n\nThe players table contains these documents:\n\n [\n { \'id\': 1, \'player\': \'George\', \'gameId\': 1 },\n { \'id\': 2, \'player\': \'Agatha\', \'gameId\': 3 },\n { \'id\': 3, \'player\': \'Fred\', \'gameId\': 2 },\n { \'id\': 4, \'player\': \'Marie\', \'gameId\': 2 },\n { \'id\': 5, \'player\': \'Earnest\', \'gameId\': 1 },\n { \'id\': 6, \'player\': \'Beth\', \'gameId\': 3 }\n ]\n\nThe games table contains these documents:\n\n [\n { \'id\': 1, \'field\': \'Little Delving\' },\n { \'id\': 2, \'field\': \'Rushock Bog\' },\n { \'id\': 3, \'field\': \'Bucklebury\' }\n ]\n\nJoin these tables using `game_id` on the player table and `id` on the games table:\n\n r.table(\'players\').eq_join(\'game_id\', r.table(\'games\')).run(conn)\n\nThis will return a result set such as the following:\n\n [\n {\n "left" : { "gameId" : 3, "id" : 2, "player" : "Agatha" },\n "right" : { "id" : 3, "field" : "Bucklebury" }\n },\n {\n "left" : { "gameId" : 2, "id" : 3, "player" : "Fred" },\n "right" : { "id" : 2, "field" : "Rushock Bog" }\n },\n ...\n ]\n\nWhat you likely want is the result of using `zip` with that. For clarity, we\'ll use `without` to drop the `id` field from the games table (it conflicts with the `id` field for the players and it\'s redundant anyway), and we\'ll order it by the games.\n\n r.table(\'players\').eq_join(\'game_id\', r.table(\'games\')).without({\'right\': "id"}).zip().order_by(\'game_id\').run(conn)\n \n [\n { "field": "Little Delving", "gameId": 1, "id": 5, "player": "Earnest" },\n { "field": "Little Delving", "gameId": 1, "id": 1, "player": "George" },\n { "field": "Rushock Bog", "gameId": 2, "id": 3, "player": "Fred" },\n { "field": "Rushock Bog", "gameId": 2, "id": 4, "player": "Marie" },\n { "field": "Bucklebury", "gameId": 3, "id": 6, "player": "Beth" },\n { "field": "Bucklebury", "gameId": 3, "id": 2, "player": "Agatha" }\n ]\n\nFor more information, see [Table joins in RethinkDB](http://rethinkdb.com/docs/table-joins/).\n\n**Example:** Use a secondary index on the right table rather than the primary key. If players have a secondary index on their cities, we can get a list of arenas with players in the same area.\n\n r.table(\'arenas\').eq_join(\'city_id\', r.table(\'arenas\'), index=\'city_id\').run(conn)\n\n**Example:** Use a nested key as the join field. Suppose the documents in the players table were structured like this:\n\n { \'id\': 1, \'player\': \'George\', \'game\': {\'id\': 1} },\n { \'id\': 2, \'player\': \'Agatha\', \'game\': {\'id\': 3} },\n ...\n\nSimply specify the field using the `row` command instead of a string.\n\n r.table(\'players\').eq_join(r.row[\'game\'][\'id\'], r.table(\'games\')).without({\'right\': \'id\'}).zip().run(conn)\n \n [\n { "field": "Little Delving", "game": { "id": 1 }, "id": 5, "player": "Earnest" },\n { "field": "Little Delving", "game": { "id": 1 }, "id": 1, "player": "George" },\n ...\n ]\n\n**Example:** Use a function instead of a field to join on a more complicated expression. Suppose the players have lists of favorite games ranked in order in a field such as `"favorites": [3, 2, 1]`. Get a list of players and their top favorite:\n\n r.table(\'players3\').eq_join(\n lambda player: player[\'favorites\'].nth(0),\n r.table(\'games\')\n ).without([{\'left\': [\'favorites\', \'game_id\', \'id\']}, {\'right\': \'id\'}]).zip()\n\nResult:\n\n [\n \t{ "field": "Rushock Bog", "name": "Fred" },\n \t{ "field": "Little Delving", "name": "George" },\n \t...\n ]\n'),
(rethinkdb.ast.RqlQuery.inner_join, b"sequence.inner_join(other_sequence, predicate_function) -> stream\narray.inner_join(other_sequence, predicate_function) -> array\n\nReturns an inner join of two sequences. The returned sequence represents an intersection of the left-hand sequence and the right-hand sequence: each row of the left-hand sequence will be compared with each row of the right-hand sequence to find all pairs of rows which satisfy the predicate. Each matched pair of rows of both sequences are combined into a result row. In most cases, you will want to follow the join with [zip](http://rethinkdb.com/api/python/zip) to combine the left and right results.\n\n*Example* Return a list of all matchups between Marvel and DC heroes in which the DC hero could beat the Marvel hero in a fight.\n\n r.table('marvel').inner_join(r.table('dc'),\n lambda marvel_row, dc_row: marvel_row['strength'] < dc_row['strength']\n ).zip().run(conn)\n\n(Compare this to an [outer_join](http://rethinkdb.com/api/python/outer_join) with the same inputs and predicate, which would return a list of *all* Marvel heroes along with any DC heroes with a higher strength.)"),
(rethinkdb.ast.RqlQuery.outer_join, b"sequence.outer_join(other_sequence, predicate_function) -> stream\narray.outer_join(other_sequence, predicate_function) -> array\n\nReturns a left outer join of two sequences. The returned sequence represents a union of the left-hand sequence and the right-hand sequence: all documents in the left-hand sequence will be returned, each matched with a document in the right-hand sequence if one satisfies the predicate condition. In most cases, you will want to follow the join with [zip](http://rethinkdb.com/api/python/zip) to combine the left and right results.\n\n*Example* Return a list of all Marvel heroes, paired with any DC heroes who could beat them in a fight.\n\n r.table('marvel').outer_join(r.table('dc'),\n lambda marvel_row, dc_row: marvel_row['strength'] < dc_row['strength']\n ).zip().run(conn)\n\n(Compare this to an [inner_join](http://rethinkdb.com/api/python/inner_join) with the same inputs and predicate, which would return a list only of the matchups in which the DC hero has the higher strength.)\n"),
(rethinkdb.ast.RqlQuery.zip, b"stream.zip() -> stream\narray.zip() -> array\n\nUsed to 'zip' up the result of a join by merging the 'right' fields into 'left' fields of each member of the sequence.\n\n*Example* 'zips up' the sequence by merging the left and right fields produced by a join.\n\n r.table('marvel').eq_join('main_dc_collaborator', r.table('dc')).zip().run(conn)\n"),
(rethinkdb.db_create, b'r.db_create(db_name) -> object\n\nCreate a database. A RethinkDB database is a collection of tables, similar to\nrelational databases.\n\nIf successful, the command returns an object with two fields:\n\n* `dbs_created`: always `1`.\n* `config_changes`: a list containing one object with two fields, `old_val` and `new_val`:\n * `old_val`: always `None`.\n * `new_val`: the database\'s new [config](http://rethinkdb.com/api/python/config) value.\n\nIf a database with the same name already exists, the command throws `ReqlRuntimeError`.\n\nNote: Only alphanumeric characters and underscores are valid for the database name.\n\n*Example* Create a database named \'superheroes\'.\n\n r.db_create(\'superheroes\').run(conn)\n \n {\n "config_changes": [\n {\n "new_val": {\n "id": "e4689cfc-e903-4532-a0e6-2d6797a43f07",\n "name": "superheroes"\n },\n "old_val": None\n }\n ],\n "dbs_created": 1\n }\n\n'),
(rethinkdb.db_drop, b'r.db_drop(db_name) -> object\n\nDrop a database. The database, all its tables, and corresponding data will be deleted.\n\nIf successful, the command returns an object with two fields:\n\n* `dbs_dropped`: always `1`.\n* `tables_dropped`: the number of tables in the dropped database.\n* `config_changes`: a list containing one two-field object, `old_val` and `new_val`:\n * `old_val`: the database\'s original [config](http://rethinkdb.com/api/python/config) value.\n * `new_val`: always `None`.\n\nIf the given database does not exist, the command throws `ReqlRuntimeError`.\n\n*Example* Drop a database named \'superheroes\'.\n\n r.db_drop(\'superheroes\').run(conn)\n \n {\n "config_changes": [\n {\n "old_val": {\n "id": "e4689cfc-e903-4532-a0e6-2d6797a43f07",\n "name": "superheroes"\n },\n "new_val": None\n }\n ],\n "tables_dropped": 3,\n "dbs_dropped": 1\n }\n\n'),
(rethinkdb.db_list, b'r.db_list() -> array\n\nList all database names in the system. The result is a list of strings.\n\n*Example* List all databases.\n\n r.db_list().run(conn)\n\n'),
(rethinkdb.ast.RqlQuery.changes, b'stream.changes([options]) -> stream\nsingleSelection.changes([options]) -> stream\n\nReturn a changefeed, an infinite stream of objects representing changes to a query. A changefeed may return changes to a table or an individual document (a "point" changefeed), and document transformation commands such as `filter` or `map` may be used before the `changes` command to affect the output.\n\nThere are four optional arguments to `changes`.\n\n* `squash`: Controls how change notifications are batched. Acceptable values are `True`, `False` and a numeric value:\n * `True`: When multiple changes to the same document occur before a batch of notifications is sent, the changes are "squashed" into one change. The client receives a notification that will bring it fully up to date with the server.\n * `False`: All changes will be sent to the client verbatim. This is the default.\n * `n`: A numeric value (floating point). Similar to `True`, but the server will wait `n` seconds to respond in order to squash as many changes together as possible, reducing network traffic. The first batch will always be returned immediately.\n* `changefeed_queue_size`: the number of changes the server will buffer between client reads before it starts dropping changes and generates an error (default: 100,000).\n* `include_initial`: if `True`, the changefeed stream will begin with the current contents of the table or selection being monitored. These initial results will have `new_val` fields, but no `old_val` fields. The initial results may be intermixed with actual changes, as long as an initial result for the changed document has already been given.\n* `include_states`: if `True`, the changefeed stream will include special status documents consisting of the field `state` and a string indicating a change in the feed\'s state. These documents can occur at any point in the feed between the notification documents described below. If `include_states` is `False` (the default), the status documents will not be sent.\n\nThere are currently two states:\n\n* `{"state": "initializing"}` indicates the following documents represent initial values on the feed rather than changes. This will be the first document of a feed that returns initial values.\n* `{"state": "ready"}` indicates the following documents represent changes. This will be the first document of a feed that does *not* return initial values; otherwise, it will indicate the initial values have all been sent.\n\nIf the table becomes unavailable, the changefeed will be disconnected, and a runtime exception will be thrown by the driver.\n\nChangefeed notifications take the form of a two-field object:\n\n {\n "old_val": <document before change>,\n "new_val": <document after change>\n }\n\nWhen a document is deleted, `new_val` will be `None`; when a document is inserted, `old_val` will be `None`.\n\nThe server will buffer up to 100,000 elements. If the buffer limit is hit, early changes will be discarded, and the client will receive an object of the form `{"error": "Changefeed cache over array size limit, skipped X elements."}` where `X` is the number of elements skipped.\n\nCommands that operate on streams (such as [filter](http://rethinkdb.com/api/python/filter/) or [map](http://rethinkdb.com/api/python/map/)) can usually be chained after `changes`. However, since the stream produced by `changes` has no ending, commands that need to consume the entire stream before returning (such as [reduce](http://rethinkdb.com/api/python/reduce/) or [count](http://rethinkdb.com/api/python/count/)) cannot.\n\n*Example* Subscribe to the changes on a table.\n\nStart monitoring the changefeed in one client:\n\n for change in r.table(\'games\').changes().run(conn):\n print change\n\nAs these queries are performed in a second client, the first client would receive and print the following objects:\n\n > r.table(\'games\').insert({\'id\': 1}).run(conn)\n {\'old_val\': None, \'new_val\': {\'id\': 1}}\n \n > r.table(\'games\').get(1).update({\'player1\': \'Bob\'}).run(conn)\n {\'old_val\': {\'id\': 1}, \'new_val\': {\'id\': 1, \'player1\': \'Bob\'}}\n \n > r.table(\'games\').get(1).replace({\'id\': 1, \'player1\': \'Bob\', \'player2\': \'Alice\'}).run(conn)\n {\'old_val\': {\'id\': 1, \'player1\': \'Bob\'},\n \'new_val\': {\'id\': 1, \'player1\': \'Bob\', \'player2\': \'Alice\'}}\n \n > r.table(\'games\').get(1).delete().run(conn)\n {\'old_val\': {\'id\': 1, \'player1\': \'Bob\', \'player2\': \'Alice\'}, \'new_val\': None}\n \n > r.table_drop(\'games\').run(conn)\n ReqlRuntimeError: Changefeed aborted (table unavailable)\n\n*Example* Return all the changes that increase a player\'s score.\n\n r.table(\'test\').changes().filter(\n r.row[\'new_val\'][\'score\'] > r.row[\'old_val\'][\'score\']\n ).run(conn)\n\n*Example* Return all the changes to a specific player\'s score that increase it past 10.\n\n r.table(\'test\').get(1).filter(r.row[\'score\'].gt(10)).changes().run(conn)\n\n*Example* Return all the inserts on a table.\n\n r.table(\'test\').changes().filter(r.row[\'old_val\'].eq(None)).run(conn)\n\n*Example* Return all the changes to game 1, with state notifications and initial values.\n\n r.table(\'games\').get(1).changes(include_initial=True, include_states=True).run(conn)\n \n # result returned on changefeed\n {"state": "initializing"}\n {"new_val": {"id": 1, "score": 12, "arena": "Hobbiton Field"}}\n {"state": "ready"}\n {\n \t"old_val": {"id": 1, "score": 12, "arena": "Hobbiton Field"},\n \t"new_val": {"id": 1, "score": 14, "arena": "Hobbiton Field"}\n }\n {\n \t"old_val": {"id": 1, "score": 14, "arena": "Hobbiton Field"},\n \t"new_val": {"id": 1, "score": 17, "arena": "Hobbiton Field", "winner": "Frodo"}\n }\n\n*Example* Return all the changes to the top 10 games. This assumes the presence of a `score` secondary index on the `games` table.\n\n r.table(\'games\').order_by(index=r.desc(\'score\')).limit(10).run(conn)\n'),
(rethinkdb.ast.Table.index_create, b'table.index_create(index_name[, index_function][, multi=False, geo=False]) -> object\n\nCreate a new secondary index on a table. Secondary indexes improve the speed of many read queries at the slight cost of increased storage space and decreased write performance. For more information about secondary indexes, read the article "[Using secondary indexes in RethinkDB](http://rethinkdb.com/docs/secondary-indexes/)."\n\nRethinkDB supports different types of secondary indexes:\n\n- *Simple indexes* based on the value of a single field.\n- *Compound indexes* based on multiple fields.\n- *Multi indexes* based on arrays of values.\n- *Geospatial indexes* based on indexes of geometry objects, created when the `geo` optional argument is true.\n- Indexes based on *arbitrary expressions*.\n\nThe `index_function` can be an anonymous function or a binary representation obtained from the `function` field of [index_status](http://rethinkdb.com/api/python/index_status).\n\nIf successful, `create_index` will return an object of the form `{"created": 1}`. If an index by that name already exists on the table, a `ReqlRuntimeError` will be thrown.\n\n*Example* Create a simple index based on the field `post_id`.\n\n r.table(\'comments\').index_create(\'post_id\').run(conn)\n*Example* Create a simple index based on the nested field `author > name`.\n\n r.table(\'comments\').index_create(\'author_name\', r.row["author"]["name"]).run(conn)\n\n*Example* Create a geospatial index based on the field `location`.\n\n r.table(\'places\').index_create(\'location\', geo=True).run(conn)\n\nA geospatial index field should contain only geometry objects. It will work with geometry ReQL terms ([get_intersecting](http://rethinkdb.com/api/python/get_intersecting/) and [get_nearest](http://rethinkdb.com/api/python/get_nearest/)) as well as index-specific terms ([index_status](http://rethinkdb.com/api/python/index_status), [index_wait](http://rethinkdb.com/api/python/index_wait), [index_drop](http://rethinkdb.com/api/python/index_drop) and [index_list](http://rethinkdb.com/api/python/index_list)). Using terms that rely on non-geometric ordering such as [get_all](http://rethinkdb.com/api/python/get_all/), [order_by](http://rethinkdb.com/api/python/order_by/) and [between](http://rethinkdb.com/api/python/between/) will result in an error.\n\n*Example* Create a compound index based on the fields `post_id` and `date`.\n\n r.table(\'comments\').index_create(\'post_and_date\', [r.row["post_id"], r.row["date"]]).run(conn)\n\n*Example* Create a multi index based on the field `authors`.\n\n r.table(\'posts\').index_create(\'authors\', multi=True).run(conn)\n\n*Example* Create a geospatial multi index based on the field `towers`.\n\n r.table(\'networks\').index_create(\'towers\', geo=True, multi=True).run(conn)\n\n*Example* Create an index based on an arbitrary expression.\n\n r.table(\'posts\').index_create(\'authors\', lambda doc:\n r.branch(\n doc.has_fields("updated_at"),\n doc["updated_at"],\n doc["created_at"]\n )\n ).run(conn)\n\n*Example* Create a new secondary index based on an existing one.\n\n index = r.table(\'posts\').index_status(\'authors\').nth(0)[\'function\'].run(conn)\n r.table(\'new_posts\').index_create(\'authors\', index).run(conn)\n\n*Example* Rebuild an outdated secondary index on a table.\n\n old_index = r.table(\'posts\').index_status(\'old_index\').nth(0)[\'function\'].run(conn)\n r.table(\'posts\').index_create(\'new_index\', old_index).run(conn)\n r.table(\'posts\').index_wait(\'new_index\').run(conn)\n r.table(\'posts\').index_rename(\'new_index\', \'old_index\', overwrite=True).run(conn)\n'),
(rethinkdb.ast.Table.index_drop, b"table.index_drop(index_name) -> object\n\nDelete a previously created secondary index of this table.\n\n*Example* Drop a secondary index named 'code_name'.\n\n r.table('dc').index_drop('code_name').run(conn)\n\n"),
(rethinkdb.ast.Table.index_list, b"table.index_list() -> array\n\nList all the secondary indexes of this table.\n\n*Example* List the available secondary indexes for this table.\n\n r.table('marvel').index_list().run(conn)\n"),
(rethinkdb.ast.Table.index_rename, b"table.index_rename(old_index_name, new_index_name[, overwrite=False]) -> object\n\nRename an existing secondary index on a table. If the optional argument `overwrite` is specified as `True`, a previously existing index with the new name will be deleted and the index will be renamed. If `overwrite` is `False` (the default) an error will be raised if the new index name already exists.\n\nThe return value on success will be an object of the format `{'renamed': 1}`, or `{'renamed': 0}` if the old and new names are the same.\n\nAn error will be raised if the old index name does not exist, if the new index name is already in use and `overwrite` is `False`, or if either the old or new index name are the same as the primary key field name.\n\n*Example* Rename an index on the comments table.\n\n r.table('comments').index_rename('post_id', 'message_id').run(conn)\n"),
(rethinkdb.ast.Table.index_status, b'table.index_status([, index...]) -> array\n\nGet the status of the specified indexes on this table, or the status\nof all indexes on this table if no indexes are specified.\n\nThe result is an array where for each index, there will be an object like this one:\n\n {\n "index": <index_name>,\n "ready": True,\n "function": <binary>,\n "multi": <bool>,\n "outdated": <bool>\n }\n\nor this one:\n\n {\n "index": <index_name>,\n "ready": False,\n "progress": <float>,\n "function": <binary>,\n "multi": <bool>,\n "outdated": <bool>\n }\n\nThe `multi` field will be `true` or `false` depending on whether this index was created as a multi index (see [index_create](http://rethinkdb.com/api/python/index_create/) for details). The `outdated` field will be true if the index is outdated in the current version of RethinkDB and needs to be rebuilt. The `progress` field is a float between `0` and `1`, indicating how far along the server is in constructing indexes after the most recent change to the table that would affect them. (`0` indicates no such indexes have been constructed; `1` indicates all of them have.)\n\nThe `function` field is a binary object containing an opaque representation of the secondary index (including the `multi` argument if specified). It can be passed as the second argument to [index_create](http://rethinkdb.com/api/python/index_create/) to create a new index with the same function; see `index_create` for more information.\n\n*Example* Get the status of all the indexes on `test`:\n\n r.table(\'test\').index_status().run(conn)\n\n*Example* Get the status of the `timestamp` index:\n\n r.table(\'test\').index_status(\'timestamp\').run(conn)\n\n*Example* Save the binary representation of the index:\n\n func = r.table(\'test\').index_status(\'timestamp\').nth(0)[\'function\'].run(conn)\n'),
(rethinkdb.ast.Table.index_wait, b'table.index_wait([, index...]) -> array\n\nWait for the specified indexes on this table to be ready, or for all\nindexes on this table to be ready if no indexes are specified.\n\nThe result is an array containing one object for each table index:\n\n {\n "index": <index_name>,\n "ready": True,\n "function": <binary>,\n "multi": <bool>,\n "geo": <bool>,\n "outdated": <bool>\n }\n\nSee the [index_status](http://rethinkdb.com/api/python/index_status) documentation for a description of the field values.\n\n*Example* Wait for all indexes on the table `test` to be ready:\n\n r.table(\'test\').index_wait().run(conn)\n\n*Example* Wait for the index `timestamp` to be ready:\n\n r.table(\'test\').index_wait(\'timestamp\').run(conn)\n'),
(rethinkdb.ast.DB.table_create, b'db.table_create(table_name[, options]) -> object\nr.table_create(table_name[, options]) -> object\n\nCreate a table. A RethinkDB table is a collection of JSON documents.\n\nIf successful, the command returns an object with two fields:\n\n* `tables_created`: always `1`.\n* `config_changes`: a list containing one two-field object, `old_val` and `new_val`:\n * `old_val`: always `None`.\n * `new_val`: the table\'s new [config](http://rethinkdb.com/api/python/config) value.\n\nIf a table with the same name already exists, the command throws `ReqlRuntimeError`.\n\nWhen creating a table you can specify the following options:\n\n* `primary_key`: the name of the primary key. The default primary key is `id`.\n* `durability`: if set to `soft`, writes will be acknowledged by the server immediately and flushed to disk in the background. The default is `hard`: acknowledgment of writes happens after data has been written to disk.\n* `shards`: the number of shards, an integer from 1-32. Defaults to `1`.\n* `replicas`: either an integer or a mapping object. Defaults to `1`.\n * If `replicas` is an integer, it specifies the number of replicas per shard. Specifying more replicas than there are servers will return an error.\n * If `replicas` is an object, it specifies key-value pairs of server tags and the number of replicas to assign to those servers: `{\'tag1\': 2, \'tag2\': 4, \'tag3\': 2, ...}`.\n* `primary_replica_tag`: the primary server specified by its server tag. Required if `replicas` is an object; the tag must be in the object. This must *not* be specified if `replicas` is an integer.\n\nThe data type](http://rethinkdb.com/docs/data-types/) of a primary key is usually a string (like a UUID) or a number, but it can also be a time, binary object, boolean or an array. Data types can be mixed in the primary key field, but all values must be unique. Using an array as a primary key creates a compound index; read the documentation on [compound secondary indexes for more information, as it applies to primary keys as well. Primary keys cannot be objects.\n\n*Example* Create a table named \'dc_universe\' with the default settings.\n\n r.db(\'heroes\').table_create(\'dc_universe\').run(conn)\n \n {\n "config_changes": [\n {\n "new_val": {\n "db": "test",\n "durability": "hard",\n "id": "20ea60d4-3b76-4817-8828-98a236df0297",\n "name": "dc_universe",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "rethinkdb_srv1",\n "replicas": [\n "rethinkdb_srv1",\n "rethinkdb_srv2"\n ]\n }\n ],\n "write_acks": "majority"\n },\n "old_val": None\n }\n ],\n "tables_created": 1\n }\n\n*Example* Create a table named \'dc_universe\' using the field \'name\' as primary key.\n\n r.db(\'test\').table_create(\'dc_universe\', primary_key=\'name\').run(conn)\n\n*Example* Create a table set up for two shards and three replicas per shard. This requires three available servers.\n\n r.db(\'test\').table_create(\'dc_universe\', shards=2, replicas=3).run(conn)\n\nRead [Sharding and replication](http://rethinkdb.com/docs/sharding-and-replication/) for a complete discussion of the subject, including advanced topics.\n'),
(rethinkdb.ast.DB.table_drop, b'db.table_drop(table_name) -> object\n\nDrop a table. The table and all its data will be deleted.\n\nIf successful, the command returns an object with two fields:\n\n* `tables_dropped`: always `1`.\n* `config_changes`: a list containing one two-field object, `old_val` and `new_val`:\n * `old_val`: the dropped table\'s [config](http://rethinkdb.com/api/python/config) value.\n * `new_val`: always `None`.\n\nIf the given table does not exist in the database, the command throws `ReqlRuntimeError`.\n\n*Example* Drop a table named \'dc_universe\'.\n\n r.db(\'test\').table_drop(\'dc_universe\').run(conn)\n \n {\n "config_changes": [\n {\n "old_val": {\n "db": "test",\n "durability": "hard",\n "id": "20ea60d4-3b76-4817-8828-98a236df0297",\n "name": "dc_universe",\n "primary_key": "id",\n "shards": [\n {\n "primary_replica": "rethinkdb_srv1",\n "replicas": [\n "rethinkdb_srv1",\n "rethinkdb_srv2"\n ]\n }\n ],\n "write_acks": "majority"\n },\n "new_val": None\n }\n ],\n "tables_dropped": 1\n }\n'),
(rethinkdb.ast.DB.table_list, b"db.table_list() -> array\n\nList all table names in a database. The result is a list of strings.\n\n*Example* List all tables of the 'test' database.\n\n r.db('test').table_list().run(conn)\n \n"),
(rethinkdb.ast.RqlQuery.__add__, b'value + value -> value\ntime + number -> time\nvalue.add(value[, value, ...]) -> value\ntime.add(number[, number, ...]) -> time\n\nSum two or more numbers, or concatenate two or more strings or arrays. (Note that ReQL will not perform type coercion. You cannot, for example, `add` a string and a number together.) The `add` command can be called in either prefix or infix form; both forms are equivalent.\n\n*Example* It\'s as easy as 2 + 2 = 4.\n\n > (r.expr(2) + 2).run(conn)\n \n 4\n\n*Example* Concatenate strings.\n\n > (r.expr("foo") + "bar" + "baz").run(conn)\n \n "foobarbaz"\n\n*Example* Concatenate arrays.\n\n > (r.expr(["foo", "bar"]) + ["buzz"]).run(conn)\n \n ["foo", "bar", "buzz"]\n\n*Example* Create a date one year from now.\n\n (r.now() + 365*24*60*60).run(conn)\n\n*Example* Use [args](http://rethinkdb.com/api/python/args) with `add` to sum multiple values.\n\n > vals = [10, 20, 30]\n > r.add(r.args(vals)).run(conn)\n \n 60\n\n*Example* Concatenate an array of strings with `args`.\n\n > vals = [\'foo\', \'bar\', \'buzz\']\n > r.add(r.args(vals)).run(conn)\n \n "foobarbuzz"\n'),
(rethinkdb.add, b'value + value -> value\ntime + number -> time\nvalue.add(value[, value, ...]) -> value\ntime.add(number[, number, ...]) -> time\n\nSum two or more numbers, or concatenate two or more strings or arrays. (Note that ReQL will not perform type coercion. You cannot, for example, `add` a string and a number together.) The `add` command can be called in either prefix or infix form; both forms are equivalent.\n\n*Example* It\'s as easy as 2 + 2 = 4.\n\n > (r.expr(2) + 2).run(conn)\n \n 4\n\n*Example* Concatenate strings.\n\n > (r.expr("foo") + "bar" + "baz").run(conn)\n \n "foobarbaz"\n\n*Example* Concatenate arrays.\n\n > (r.expr(["foo", "bar"]) + ["buzz"]).run(conn)\n \n ["foo", "bar", "buzz"]\n\n*Example* Create a date one year from now.\n\n (r.now() + 365*24*60*60).run(conn)\n\n*Example* Use [args](http://rethinkdb.com/api/python/args) with `add` to sum multiple values.\n\n > vals = [10, 20, 30]\n > r.add(r.args(vals)).run(conn)\n \n 60\n\n*Example* Concatenate an array of strings with `args`.\n\n > vals = [\'foo\', \'bar\', \'buzz\']\n > r.add(r.args(vals)).run(conn)\n \n "foobarbuzz"\n'),
(rethinkdb.ast.RqlQuery.__and__, b'bool & bool -> bool\nbool.and_([bool, bool, ...]) -> bool\nr.and_([bool, bool, ...]) -> bool\n\nCompute the logical "and" of one or more values. The `and_` command can be used as an infix operator after its first argument (`r.expr(True).and_(False)`) or given all of its arguments as parameters (`r.and_(True, False)`). The standard Python and operator, `&`, may also be used with ReQL.\n\nCalling `and_` with zero arguments will return `True`.\n\n*Example* Return whether both `a` and `b` evaluate to true.\n\n > a = True\n > b = False\n > (r.expr(a) & b).run(conn)\n \n False\n*Example* Return whether all of `x`, `y` and `z` evaluate to true.\n\n > x = True\n > y = True\n > z = True\n > r.and_(x, y, z).run(conn)\n \n True\n'),
(rethinkdb.and_, b'bool & bool -> bool\nbool.and_([bool, bool, ...]) -> bool\nr.and_([bool, bool, ...]) -> bool\n\nCompute the logical "and" of one or more values. The `and_` command can be used as an infix operator after its first argument (`r.expr(True).and_(False)`) or given all of its arguments as parameters (`r.and_(True, False)`). The standard Python and operator, `&`, may also be used with ReQL.\n\nCalling `and_` with zero arguments will return `True`.\n\n*Example* Return whether both `a` and `b` evaluate to true.\n\n > a = True\n > b = False\n > (r.expr(a) & b).run(conn)\n \n False\n*Example* Return whether all of `x`, `y` and `z` evaluate to true.\n\n > x = True\n > y = True\n > z = True\n > r.and_(x, y, z).run(conn)\n \n True\n'),
(rethinkdb.ast.RqlQuery.ceil, b"r.ceil(number) -> number\nnumber.ceil() -> number\n\nRounds the given value up, returning the smallest integer value greater than or equal to the given value (the value's ceiling).\n\n*Example* Return the ceiling of 12.345.\n\n > r.ceil(12.345).run(conn)\n \n 13.0\n\nThe `ceil` command can also be chained after an expression.\n\n*Example* Return the ceiling of -12.345.\n\n > r.expr(-12.345).ceil().run(conn)\n \n -12.0\n\n*Example* Return Iron Man's weight, rounded up with `ceil`.\n\n r.table('superheroes').get('ironman')['weight'].ceil().run(conn)\n"),
(rethinkdb.ast.RqlQuery.__div__, b"number / number -> number\nnumber.div(number[, number ...]) -> number\n\nDivide two numbers.\n\n*Example* It's as easy as 2 / 2 = 1.\n\n (r.expr(2) / 2).run(conn)\n"),
(rethinkdb.div, b"number / number -> number\nnumber.div(number[, number ...]) -> number\n\nDivide two numbers.\n\n*Example* It's as easy as 2 / 2 = 1.\n\n (r.expr(2) / 2).run(conn)\n"),
(rethinkdb.ast.RqlQuery.__eq__, b"value.eq(value[, value, ...]) -> bool\nvalue == value -> bool\n\nTest if two or more values are equal.\n\n*Example* See if a user's `role` field is set to `administrator`. \n\n r.table('users').get(1)['role'].eq('administrator').run(conn)\n # alternative syntax\n (r.table('users').get(1)['role'] == 'administrator').run(conn)\n\n*Example* See if three variables contain equal values.\n\n r.eq(a, b, c).run(conn)\n"),
(rethinkdb.ast.RqlQuery.eq, b"value.eq(value[, value, ...]) -> bool\nvalue == value -> bool\n\nTest if two or more values are equal.\n\n*Example* See if a user's `role` field is set to `administrator`. \n\n r.table('users').get(1)['role'].eq('administrator').run(conn)\n # alternative syntax\n (r.table('users').get(1)['role'] == 'administrator').run(conn)\n\n*Example* See if three variables contain equal values.\n\n r.eq(a, b, c).run(conn)\n"),
(rethinkdb.ast.RqlQuery.floor, b"r.floor(number) -> number\nnumber.floor() -> number\n\nRounds the given value down, returning the largest integer value less than or equal to the given value (the value's floor).\n\n*Example* Return the floor of 12.345.\n\n > r.floor(12.345).run(conn)\n \n 12.0\n\nThe `floor` command can also be chained after an expression.\n\n*Example* Return the floor of -12.345.\n\n > r.expr(-12.345).floor().run(conn)\n \n -13.0\n\n*Example* Return Iron Man's weight, rounded down with `floor`.\n\n r.table('superheroes').get('ironman')['weight'].floor().run(conn)\n"),
(rethinkdb.ast.RqlQuery.__ge__, b"value.ge(value[, value, ...]) -> bool\nvalue >= value -> bool\n\nCompare values, testing if the left-hand value is greater or equal to than the right-hand.\n\n*Example* Test if a player has scored 10 points or more.\n\n r.table('players').get(1)['score'].ge(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] >= 10).run(conn)\n\n*Example* Test if variables are ordered from lowest to highest.\n\n a = 10\n b = 20\n c = 15\n r.ge(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.ge(a, b).and(r.ge(b, c)).run(conn)\n"),
(rethinkdb.ast.RqlQuery.ge, b"value.ge(value[, value, ...]) -> bool\nvalue >= value -> bool\n\nCompare values, testing if the left-hand value is greater or equal to than the right-hand.\n\n*Example* Test if a player has scored 10 points or more.\n\n r.table('players').get(1)['score'].ge(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] >= 10).run(conn)\n\n*Example* Test if variables are ordered from lowest to highest.\n\n a = 10\n b = 20\n c = 15\n r.ge(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.ge(a, b).and(r.ge(b, c)).run(conn)\n"),
(rethinkdb.ast.RqlQuery.__gt__, b"value.gt(value[, value, ...]) -> bool\nvalue > value -> bool\n\nCompare values, testing if the left-hand value is greater than the right-hand.\n\n*Example* Test if a player has scored more than 10 points.\n\n r.table('players').get(1)['score'].gt(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] > 10).run(conn)\n\n*Example* Test if variables are ordered from lowest to highest, with no values being equal to one another.\n\n a = 10\n b = 20\n c = 15\n r.gt(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.gt(a, b).and(r.gt(b, c)).run(conn)\n"),
(rethinkdb.ast.RqlQuery.gt, b"value.gt(value[, value, ...]) -> bool\nvalue > value -> bool\n\nCompare values, testing if the left-hand value is greater than the right-hand.\n\n*Example* Test if a player has scored more than 10 points.\n\n r.table('players').get(1)['score'].gt(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] > 10).run(conn)\n\n*Example* Test if variables are ordered from lowest to highest, with no values being equal to one another.\n\n a = 10\n b = 20\n c = 15\n r.gt(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.gt(a, b).and(r.gt(b, c)).run(conn)\n"),
(rethinkdb.ast.RqlQuery.__le__, b"value.le(value[, value, ...]) -> bool\nvalue <= value -> bool\n\nCompare values, testing if the left-hand value is less than or equal to the right-hand.\n\n*Example* Test if a player has scored 10 points or less.\n\n r.table('players').get(1)['score'].le(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] <= 10).run(conn)\n\n*Example* Test if variables are ordered from highest to lowest.\n\n a = 20\n b = 10\n c = 15\n r.le(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.le(a, b).and(r.le(b, c)).run(conn)\n"),
(rethinkdb.ast.RqlQuery.le, b"value.le(value[, value, ...]) -> bool\nvalue <= value -> bool\n\nCompare values, testing if the left-hand value is less than or equal to the right-hand.\n\n*Example* Test if a player has scored 10 points or less.\n\n r.table('players').get(1)['score'].le(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] <= 10).run(conn)\n\n*Example* Test if variables are ordered from highest to lowest.\n\n a = 20\n b = 10\n c = 15\n r.le(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.le(a, b).and(r.le(b, c)).run(conn)\n"),
(rethinkdb.ast.RqlQuery.__lt__, b"value.lt(value[, value, ...]) -> bool\nvalue < value -> bool\n\nCompare values, testing if the left-hand value is less than the right-hand.\n\n*Example* Test if a player has scored less than 10 points.\n\n r.table('players').get(1)['score'].lt(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] < 10).run(conn)\n\n*Example* Test if variables are ordered from highest to lowest, with no values being equal to one another.\n\n a = 20\n b = 10\n c = 15\n r.lt(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.lt(a, b).and(r.lt(b, c)).run(conn)\n"),
(rethinkdb.ast.RqlQuery.lt, b"value.lt(value[, value, ...]) -> bool\nvalue < value -> bool\n\nCompare values, testing if the left-hand value is less than the right-hand.\n\n*Example* Test if a player has scored less than 10 points.\n\n r.table('players').get(1)['score'].lt(10).run(conn)\n # alternative syntax\n (r.table('players').get(1)['score'] < 10).run(conn)\n\n*Example* Test if variables are ordered from highest to lowest, with no values being equal to one another.\n\n a = 20\n b = 10\n c = 15\n r.lt(a, b, c).run(conn)\n\nThis is the equivalent of the following:\n\n r.lt(a, b).and(r.lt(b, c)).run(conn)\n"),
(rethinkdb.ast.RqlQuery.__mod__, b"number % number -> number\n\nFind the remainder when dividing two numbers.\n\n*Example* It's as easy as 2 % 2 = 0.\n\n (r.expr(2) % 2).run(conn)\n\n`\n"),
(rethinkdb.mod, b"number % number -> number\n\nFind the remainder when dividing two numbers.\n\n*Example* It's as easy as 2 % 2 = 0.\n\n (r.expr(2) % 2).run(conn)\n\n`\n"),
(rethinkdb.ast.RqlQuery.__mul__, b'number * number -> number\narray * number -> array\nnumber.mul(number[, number, ...]) -> number\narray.mul(number[, number, ...]) -> array\n\nMultiply two numbers, or make a periodic array.\n\n*Example* It\'s as easy as 2 * 2 = 4.\n\n (r.expr(2) * 2).run(conn)\n\n*Example* Arrays can be multiplied by numbers as well.\n\n (r.expr(["This", "is", "the", "song", "that", "never", "ends."]) * 100).run(conn)\n\n'),
(rethinkdb.mul, b'number * number -> number\narray * number -> array\nnumber.mul(number[, number, ...]) -> number\narray.mul(number[, number, ...]) -> array\n\nMultiply two numbers, or make a periodic array.\n\n*Example* It\'s as easy as 2 * 2 = 4.\n\n (r.expr(2) * 2).run(conn)\n\n*Example* Arrays can be multiplied by numbers as well.\n\n (r.expr(["This", "is", "the", "song", "that", "never", "ends."]) * 100).run(conn)\n\n'),
(rethinkdb.ast.RqlQuery.__ne__, b"value.ne(value[, value, ...]) -> bool\nvalue != value -> bool\n\nTest if two or more values are not equal.\n\n*Example* See if a user's `role` field is not set to `administrator`. \n\n r.table('users').get(1)['role'].ne('administrator').run(conn)\n # alternative syntax\n (r.table('users').get(1)['role'] != 'administrator').run(conn)\n\n*Example* See if three variables do not contain equal values.\n\n r.ne(a, b, c).run(conn)\n"),
(rethinkdb.ast.RqlQuery.ne, b"value.ne(value[, value, ...]) -> bool\nvalue != value -> bool\n\nTest if two or more values are not equal.\n\n*Example* See if a user's `role` field is not set to `administrator`. \n\n r.table('users').get(1)['role'].ne('administrator').run(conn)\n # alternative syntax\n (r.table('users').get(1)['role'] != 'administrator').run(conn)\n\n*Example* See if three variables do not contain equal values.\n\n r.ne(a, b, c).run(conn)\n"),
(rethinkdb.ast.RqlQuery.__invert__, b'bool.not_() -> bool\nnot_(bool) -> bool\n(~bool) -> bool\n\nCompute the logical inverse (not) of an expression.\n\n`not_` can be called either via method chaining, immediately after an expression that evaluates as a boolean value, or by passing the expression as a parameter to `not_`. All values that are not `False` or `None` will be converted to `True`.\n\nYou may also use `~` as a shorthand operator.\n\n*Example* Not true is false.\n\n r.not_(True).run(conn)\n r.expr(True).not_().run(conn)\n (~r.expr(True)).run(conn)\n\nThese evaluate to `false`.\n\nNote that when using `~` the expression is wrapped in parentheses. Without this, Python will evaluate `r.expr(True)` *first* rather than using the ReQL operator and return an incorrect value. (`~True` evaluates to −2 in Python.)\n\n*Example* Return all the users that do not have a "flag" field.\n\n r.table(\'users\').filter(\n lambda users: (~users.has_fields(\'flag\'))\n ).run(conn)\n\n*Example* As above, but prefix-style.\n\n r.table(\'users\').filter(\n lambda users: r.not_(users.has_fields(\'flag\'))\n ).run(conn)\n'),
(rethinkdb.ast.RqlQuery.not_, b'bool.not_() -> bool\nnot_(bool) -> bool\n(~bool) -> bool\n\nCompute the logical inverse (not) of an expression.\n\n`not_` can be called either via method chaining, immediately after an expression that evaluates as a boolean value, or by passing the expression as a parameter to `not_`. All values that are not `False` or `None` will be converted to `True`.\n\nYou may also use `~` as a shorthand operator.\n\n*Example* Not true is false.\n\n r.not_(True).run(conn)\n r.expr(True).not_().run(conn)\n (~r.expr(True)).run(conn)\n\nThese evaluate to `false`.\n\nNote that when using `~` the expression is wrapped in parentheses. Without this, Python will evaluate `r.expr(True)` *first* rather than using the ReQL operator and return an incorrect value. (`~True` evaluates to −2 in Python.)\n\n*Example* Return all the users that do not have a "flag" field.\n\n r.table(\'users\').filter(\n lambda users: (~users.has_fields(\'flag\'))\n ).run(conn)\n\n*Example* As above, but prefix-style.\n\n r.table(\'users\').filter(\n lambda users: r.not_(users.has_fields(\'flag\'))\n ).run(conn)\n'),
(rethinkdb.not_, b'bool.not_() -> bool\nnot_(bool) -> bool\n(~bool) -> bool\n\nCompute the logical inverse (not) of an expression.\n\n`not_` can be called either via method chaining, immediately after an expression that evaluates as a boolean value, or by passing the expression as a parameter to `not_`. All values that are not `False` or `None` will be converted to `True`.\n\nYou may also use `~` as a shorthand operator.\n\n*Example* Not true is false.\n\n r.not_(True).run(conn)\n r.expr(True).not_().run(conn)\n (~r.expr(True)).run(conn)\n\nThese evaluate to `false`.\n\nNote that when using `~` the expression is wrapped in parentheses. Without this, Python will evaluate `r.expr(True)` *first* rather than using the ReQL operator and return an incorrect value. (`~True` evaluates to −2 in Python.)\n\n*Example* Return all the users that do not have a "flag" field.\n\n r.table(\'users\').filter(\n lambda users: (~users.has_fields(\'flag\'))\n ).run(conn)\n\n*Example* As above, but prefix-style.\n\n r.table(\'users\').filter(\n lambda users: r.not_(users.has_fields(\'flag\'))\n ).run(conn)\n'),
(rethinkdb.ast.RqlQuery.__or__, b'bool | bool -> bool\nbool.or_([bool, bool, ...]) -> bool\nr.or_([bool, bool, ...]) -> bool\n\nCompute the logical "or" of one or more values. The `or_` command can be used as an infix operator after its first argument (`r.expr(True).or_(False)`) or given all of its arguments as parameters (`r.or_(True, False)`). The standard Python or operator, `|`, may also be used with ReQL.\n\nCalling `or_` with zero arguments will return `False`.\n\n*Example* Return whether either `a` or `b` evaluate to true.\n\n > a = True\n > b = False\n > (r.expr(a) | b).run(conn)\n \n True\n\n*Example* Return whether any of `x`, `y` or `z` evaluate to true.\n\n > x = False\n > y = False\n > z = False\n > r.or_(x, y, z).run(conn)\n \n False\n\n__Note:__ When using `or` inside a `filter` predicate to test the values of fields that may not exist on the documents being tested, you should use the `default` command with those fields so they explicitly return `False`.\n\n r.table(\'posts\').filter(lambda post:\n post[\'category\'].default(\'foo\').eq(\'article\').or(\n post[\'genre\'].default(\'foo\').eq(\'mystery\'))\n ).run(conn)\n'),
(rethinkdb.or_, b'bool | bool -> bool\nbool.or_([bool, bool, ...]) -> bool\nr.or_([bool, bool, ...]) -> bool\n\nCompute the logical "or" of one or more values. The `or_` command can be used as an infix operator after its first argument (`r.expr(True).or_(False)`) or given all of its arguments as parameters (`r.or_(True, False)`). The standard Python or operator, `|`, may also be used with ReQL.\n\nCalling `or_` with zero arguments will return `False`.\n\n*Example* Return whether either `a` or `b` evaluate to true.\n\n > a = True\n > b = False\n > (r.expr(a) | b).run(conn)\n \n True\n\n*Example* Return whether any of `x`, `y` or `z` evaluate to true.\n\n > x = False\n > y = False\n > z = False\n > r.or_(x, y, z).run(conn)\n \n False\n\n__Note:__ When using `or` inside a `filter` predicate to test the values of fields that may not exist on the documents being tested, you should use the `default` command with those fields so they explicitly return `False`.\n\n r.table(\'posts\').filter(lambda post:\n post[\'category\'].default(\'foo\').eq(\'article\').or(\n post[\'genre\'].default(\'foo\').eq(\'mystery\'))\n ).run(conn)\n'),
(rethinkdb.random, b"r.random() -> number\nr.random(number[, number], float=True) -> number\nr.random(integer[, integer]) -> integer\n\nGenerate a random number between given (or implied) bounds. `random` takes zero, one or two arguments.\n\n- With __zero__ arguments, the result will be a floating-point number in the range `[0,1)` (from 0 up to but not including 1).\n- With __one__ argument _x,_ the result will be in the range `[0,x)`, and will be integer unless `float=True` is given as an option. Specifying a floating point number without the `float` option will raise an error.\n- With __two__ arguments _x_ and _y,_ the result will be in the range `[x,y)`, and will be integer unless `float=True` is given as an option. If _x_ and _y_ are equal an error will occur, unless the floating-point option has been specified, in which case _x_ will be returned. Specifying a floating point number without the `float` option will raise an error.\n\nNote: The last argument given will always be the 'open' side of the range, but when\ngenerating a floating-point number, the 'open' side may be less than the 'closed' side.\n\n*Example* Generate a random number in the range `[0,1)`\n\n r.random().run(conn)\n\n*Example* Generate a random integer in the range `[0,100)`\n\n r.random(100).run(conn)\n r.random(0, 100).run(conn)\n\n*Example* Generate a random number in the range `(-2.24,1.59]`\n\n r.random(1.59, -2.24, float=True).run(conn)\n\n"),
(rethinkdb.ast.RqlQuery.round, b"r.round(number) -> number\nnumber.round() -> number\n\nRounds the given value to the nearest whole integer. For example, values of 1.0 up to but not including 1.5 will return 1.0, similar to floor; values of 1.5 up to 2.0 will return 2.0, similar to ceil.\n\n*Example* Round 12.345 to the nearest integer.\n\n > r.round(12.345).run(conn)\n \n 12.0\n\nThe `round` command can also be chained after an expression.\n\n*Example* Round -12.345 to the nearest integer.\n\n > r.expr(-12.345).round().run(conn)\n \n -12.0\n\n*Example* Return Iron Man's weight, rounded to the nearest integer.\n\n r.table('superheroes').get('ironman')['weight'].round().run(conn)\n"),
(rethinkdb.ast.RqlQuery.__sub__, b"number - number -> number\ntime - number -> time\ntime - time -> number\nnumber.sub(number[, number, ...]) -> number\ntime.sub(number[, number, ...]) -> time\ntime.sub(time) -> number\n\nSubtract two numbers.\n\n*Example* It's as easy as 2 - 2 = 0.\n\n (r.expr(2) - 2).run(conn)\n\n*Example* Create a date one year ago today.\n\n r.now() - 365*24*60*60\n\n*Example* Retrieve how many seconds elapsed between today and `date`.\n\n r.now() - date\n\n"),
(rethinkdb.sub, b"number - number -> number\ntime - number -> time\ntime - time -> number\nnumber.sub(number[, number, ...]) -> number\ntime.sub(number[, number, ...]) -> time\ntime.sub(time) -> number\n\nSubtract two numbers.\n\n*Example* It's as easy as 2 - 2 = 0.\n\n (r.expr(2) - 2).run(conn)\n\n*Example* Create a date one year ago today.\n\n r.now() - 365*24*60*60\n\n*Example* Retrieve how many seconds elapsed between today and `date`.\n\n r.now() - date\n\n"),
(rethinkdb.ast.Table.between, b'table.between(lower_key, upper_key[, options]) -> table_slice\ntable_slice.between(lower_key, upper_key[, options]) -> table_slice\n\nGet all documents between two keys. Accepts three optional arguments: `index`, `left_bound`, and `right_bound`. If `index` is set to the name of a secondary index, `between` will return all documents where that index\'s value is in the specified range (it uses the primary key by default). `left_bound` or `right_bound` may be set to `open` or `closed` to indicate whether or not to include that endpoint of the range (by default, `left_bound` is closed and `right_bound` is open).\n\nYou may also use the special constants `r.minval` and `r.maxval` for boundaries, which represent "less than any index key" and "more than any index key" respectively. For instance, if you use `r.minval` as the lower key, then `between` will return all documents whose primary keys (or indexes) are less than the specified upper key.\n\nIf you use arrays as indexes (compound indexes), they will be sorted using lexicographical order. Take the following range as an example:\n\n\t[[1, "c"] ... [5, "e"]]\n\nThis range includes all compound keys:\n\n* whose first item is 1 and second item is equal or greater than "c";\n* whose first item is between 1 and 5, *regardless of the value of the second item*;\n* whose first item is 5 and second item is less than or equal to "e".\n\n*Example* Find all users with primary key >= 10 and < 20 (a normal half-open interval).\n\n r.table(\'marvel\').between(10, 20).run(conn)\n\n*Example* Find all users with primary key >= 10 and <= 20 (an interval closed on both sides).\n\n r.table(\'marvel\').between(10, 20, right_bound=\'closed\').run(conn)\n\n*Example* Find all users with primary key < 20.\n\n r.table(\'marvel\').between(r.minval, 20).run(conn)\n\n*Example* Find all users with primary key > 10.\n\n r.table(\'marvel\').between(10, r.maxval, left_bound=\'open\').run(conn)\n\n*Example* Between can be used on secondary indexes too. Just pass an optional index argument giving the secondary index to query.\n\n r.table(\'dc\').between(\'dark_knight\', \'man_of_steel\', index=\'code_name\').run(conn)\n\n*Example* Get all users whose full name is between "John Smith" and "Wade Welles."\n\n r.table("users").between(["Smith", "John"], ["Welles", "Wade"],\n index="full_name").run(conn)\n\n*Example* Get the top 10 ranked teams in order.\n\n r.table("teams").order_by(index="rank").between(1, 11).run(conn)\n\n__Note:__ When `between` is chained after [order_by](http://rethinkdb.com/api/python/order_by), both commands must use the same index; `between` will default to the index `order_by` is using, so in this example `"rank"` is automatically being used by `between`. Trying to specify another index will result in a `ReqlRuntimeError`.\n\n*Example* Subscribe to a [changefeed](http://rethinkdb.com/docs/changefeeds/python) of teams ranked in the top 10.\n\n changes = r.table("teams").between(1, 11, index="rank").changes().run(conn)\n\n'),
(rethinkdb.db, b"r.db(db_name) -> db\n\nReference a database.\n\nThe `db` command is optional. If it is not present in a query, the query will run against the database specified in the `db` argument given to [run](http://rethinkdb.com/api/python/run) if one was specified. Otherwise, the query will run against the default database for the connection, specified in the `db` argument to [connect](http://rethinkdb.com/api/python/connect).\n\n*Example* Explicitly specify a database for a query.\n\n r.db('heroes').table('marvel').run(conn)\n\n"),
(rethinkdb.ast.RqlQuery.filter, b'selection.filter(predicate_function[, default=False]) -> selection\nstream.filter(predicate_function[, default=False]) -> stream\narray.filter(predicate_function[, default=False]) -> array\n\nReturn all the elements in a sequence for which the given predicate is true. The return value of `filter` will be the same as the input (sequence, stream, or array). Documents can be filtered in a variety of ways—ranges, nested values, boolean conditions, and the results of anonymous functions.\n\nBy default, `filter` will silently skip documents with missing fields: if the predicate tries to access a field that doesn\'t exist (for instance, the predicate `{\'age\': 30}` applied to a document with no `age` field), that document will not be returned in the result set, and no error will be generated. This behavior can be changed with the `default` optional argument.\n\n* If `default` is set to `True`, documents with missing fields will be returned rather than skipped.\n* If `default` is set to `r.error()`, an `ReqlRuntimeError` will be thrown when a document with a missing field is tested.\n* If `default` is set to `False` (the default), documents with missing fields will be skipped.\n\n*Example* Get all users who are 30 years old.\n\n r.table(\'users\').filter({\'age\': 30}).run(conn)\n\nThe predicate `{\'age\': 30}` selects documents in the `users` table with an `age` field whose value is `30`. Documents with an `age` field set to any other value *or* with no `age` field present are skipped.\n\nWhile the `{\'field\': value}` style of predicate is useful for exact matches, a more general way to write a predicate is to use the [row](http://rethinkdb.com/api/python/row) command with a comparison operator such as [eq](http://rethinkdb.com/api/python/eq) (`==`) or [gt](http://rethinkdb.com/api/python/gt) (`>`), or to use a lambda function that returns `True` or `False`.\n\n r.table(\'users\').filter(r.row["age"] == 30).run(conn)\n\nIn this case, the predicate `r.row["age"] == 30` returns `True` if the field `age` is equal to 30. You can write this predicate as a lambda function instead:\n\n r.table(\'users\').filter(lambda user:\n user["age"] == 30\n ).run(conn)\n\nPredicates to `filter` are evaluated on the server, and must use ReQL expressions. Some Python comparison operators are overloaded by the RethinkDB driver and will be translated to ReQL, such as `==`, `<`/`>` and `|`/`&` (note the single character form, rather than `||`/`&&`).\n\nAlso, predicates must evaluate document fields. They cannot evaluate [secondary indexes](http://rethinkdb.com/docs/secondary-indexes/).\n\n*Example* Get all users who are more than 18 years old.\n\n r.table("users").filter(r.row["age"] > 18).run(conn)\n\n*Example* Get all users who are less than 18 years old and more than 13 years old.\n\n r.table("users").filter((r.row["age"] < 18) & (r.row["age"] > 13)).run(conn)\n\n*Example* Get all users who are more than 18 years old or have their parental consent.\n\n r.table("users").filter(\n (r.row["age"] >= 18) | (r.row["hasParentalConsent"])).run(conn)\n\n*Example* Retrieve all users who subscribed between January 1st, 2012\n(included) and January 1st, 2013 (excluded).\n\n r.table("users").filter(\n lambda user: user["subscription_date"].during(\n r.time(2012, 1, 1, \'Z\'), r.time(2013, 1, 1, \'Z\'))\n ).run(conn)\n\n*Example* Retrieve all users who have a gmail account (whose field `email` ends with `@gmail.com`).\n\n r.table("users").filter(\n lambda user: user["email"].match("@gmail.com$")\n ).run(conn)\n\n*Example* Filter based on the presence of a value in an array.\n\nGiven this schema for the `users` table:\n\n {\n "name": <type \'str\'>\n "places_visited": [<type \'str\'>]\n }\n\nRetrieve all users whose field `places_visited` contains `France`.\n\n r.table("users").filter(lambda user:\n user["places_visited"].contains("France")\n ).run(conn)\n\n*Example* Filter based on nested fields.\n\nGiven this schema for the `users` table:\n\n {\n "id": <type \'str\'>\n "name": {\n "first": <type \'str\'>,\n "middle": <type \'str\'>,\n "last": <type \'str\'>\n }\n }\n\nRetrieve all users named "William Adama" (first name "William", last name\n"Adama"), with any middle name.\n\n r.table("users").filter({\n "name": {\n "first": "William",\n "last": "Adama"\n }\n }).run(conn)\n\nIf you want an exact match for a field that is an object, you will have to use `r.literal`.\n\nRetrieve all users named "William Adama" (first name "William", last name\n"Adama"), and who do not have a middle name.\n\n r.table("users").filter(r.literal({\n "name": {\n "first": "William",\n "last": "Adama"\n }\n })).run(conn)\n\nYou may rewrite these with lambda functions.\n\n r.table("users").filter(\n lambda user:\n (user["name"]["first"] == "William")\n & (user["name"]["last"] == "Adama")\n ).run(conn)\n\n r.table("users").filter(lambda user:\n user["name"] == {\n "first": "William",\n "last": "Adama"\n }\n ).run(conn)\n\nBy default, documents missing fields tested by the `filter` predicate are skipped. In the previous examples, users without an `age` field are not returned. By passing the optional `default` argument to `filter`, you can change this behavior.\n\n*Example* Get all users less than 18 years old or whose `age` field is missing.\n\n r.table("users").filter(r.row["age"] < 18, default=True).run(conn)\n\n*Example* Get all users more than 18 years old. Throw an error if a\ndocument is missing the field `age`.\n\n r.table("users").filter(r.row["age"] > 18, default=r.error()).run(conn)\n\n*Example* Get all users who have given their phone number (all the documents whose field `phone_number` exists and is not `None`).\n\n r.table(\'users\').filter(\n lambda user: user.has_fields(\'phone_number\')\n ).run(conn)\n\n*Example* Get all users with an "editor" role or an "admin" privilege.\n\n r.table(\'users\').filter(\n lambda user: (user[\'role\'] == \'editor\').default(False) |\n (user[\'privilege\'] == \'admin\').default(False)\n ).run(conn)\n\nInstead of using the `default` optional argument to `filter`, we have to use default values on the fields within the `or` clause. Why? If the field on the left side of the `or` clause is missing from a document—in this case, if the user doesn\'t have a `role` field—the predicate will generate an error, and will return `False` (or the value the `default` argument is set to) without evaluating the right side of the `or`. By using `.default(False)` on the fields, each side of the `or` will evaluate to either the field\'s value or `False` if the field doesn\'t exist.\n'),
(rethinkdb.ast.Table.get, b"table.get(key) -> singleRowSelection\n\nGet a document by primary key.\n\nIf no document exists with that primary key, `get` will return `None`.\n\n*Example* Find a document by UUID.\n\n r.table('posts').get('a9849eef-7176-4411-935b-79a6e3c56a74').run(conn)\n\n*Example* Find a document and merge another document with it.\n\n r.table('heroes').get(3).merge(\n { 'powers': ['invisibility', 'speed'] }\n ).run(conn)\n\n_*Example* Subscribe to a document's [changefeed](http://rethinkdb.com/docs/changefeeds/python).\n\n changes = r.table('heroes').get(3).changes().run(conn)\n"),
(rethinkdb.ast.Table.get_all, b"table.get_all(key1[, key2...], [, index='id']) -> selection\n\nGet all documents where the given value matches the value of the requested index.\n\n*Example* Secondary index keys are not guaranteed to be unique so we cannot query via [get](http://rethinkdb.com/api/python/get/) when using a secondary index.\n\n r.table('marvel').get_all('man_of_steel', index='code_name').run(conn)\n\n*Example* Without an index argument, we default to the primary index. While `get` will either return the document or `None` when no document with such a primary key value exists, this will return either a one or zero length stream.\n\n r.table('dc').get_all('superman').run(conn)\n\n*Example* You can get multiple documents in a single call to `get_all`.\n\n r.table('dc').get_all('superman', 'ant man').run(conn)\n\n*Example* You can use [args](http://rethinkdb.com/api/python/args/) with `get_all` to retrieve multiple documents whose keys are in a list. This uses `get_all` to get a list of female superheroes, coerces that to an array, and then gets a list of villains who have those superheroes as enemies.\n\n r.do(\n r.table('heroes').get_all('f', {'index': 'gender'})['id'].coerce_to('array'), \n lamdba heroines: r.table('villains').get_all(r.args(heroines))\n ).run(conn)\n\nSecondary indexes can be used in extremely powerful ways with `get_all` and other commands; read the full article on [secondary indexes](http://rethinkdb.com/docs/secondary-indexes) for examples using boolean operations, `contains` and more.\n"),
(rethinkdb.ast.DB.table, b"db.table(name[, read_mode='single', identifier_format='name']) -> table\n\nReturn all documents in a table. Other commands may be chained after `table` to return a subset of documents (such as [get](http://rethinkdb.com/api/python/get/) and [filter](http://rethinkdb.com/api/python/filter/)) or perform further processing.\n\n*Example* Return all documents in the table 'marvel' of the default database.\n\n r.table('marvel').run(conn)\n\n*Example* Return all documents in the table 'marvel' of the database 'heroes'.\n\n r.db('heroes').table('marvel').run(conn)\n\nThere are two optional arguments.\n\n* `read_mode`: One of three possible values affecting the consistency guarantee for the table read:\n * `single` returns values that are in memory (but not necessarily written to disk) on the primary replica. This is the default.\n * `majority` will only return values that are safely committed on disk on a majority of replicas. This requires sending a message to every replica on each read, so it is the slowest but most consistent.\n * `outdated` will return values that are in memory on an arbitrarily-selected replica. This is the fastest but least consistent.\n* `identifier_format`: possible values are `name` and `uuid`, with a default of `name`. If set to `uuid`, then [system tables](http://rethinkdb.com/docs/system-tables/) will refer to servers, databases and tables by UUID rather than name. (This only has an effect when used with system tables.)\n\n*Example* Allow potentially out-of-date data in exchange for faster reads.\n\n r.db('heroes').table('marvel', read_mode='outdated').run(conn)\n"),
(rethinkdb.ast.RqlQuery.downcase, b'string.downcase() -> string\n\nLowercases a string.\n\n*Example*\n\n > r.expr("Sentence about LaTeX.").downcase().run(conn)\n "sentence about latex."\n\n__Note:__ `upcase` and `downcase` only affect ASCII characters.\n'),
(rethinkdb.ast.RqlQuery.match, b'string.match(regexp) -> None/object\n\nMatches against a regular expression. If there is a match, returns an object with the fields:\n\n- `str`: The matched string\n- `start`: The matched string\'s start\n- `end`: The matched string\'s end\n- `groups`: The capture groups defined with parentheses\n\nIf no match is found, returns `None`.\n\nAccepts RE2 syntax\n([https://code.google.com/p/re2/wiki/Syntax](https://code.google.com/p/re2/wiki/Syntax)).\nYou can enable case-insensitive matching by prefixing the regular expression with\n`(?i)`. See the linked RE2 documentation for more flags.\n\nThe `match` command does not support backreferences.\n\n*Example* Get all users whose name starts with "A". Because `None` evaluates to `false` in\n[filter](http://rethinkdb.com/api/python/filter/), you can just use the result of `match` for the predicate.\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("^A")\n ).run(conn)\n\n*Example* Get all users whose name ends with "n".\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("n$")\n ).run(conn)\n\n*Example* Get all users whose name has "li" in it\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("li")\n ).run(conn)\n\n*Example* Get all users whose name is "John" with a case-insensitive search.\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("(?i)^john$")\n ).run(conn)\n\n*Example* Get all users whose name is composed of only characters between "a" and "z".\n\n r.table(\'users\').filter(lambda doc:\n doc[\'name\'].match("(?i)^[a-z]+$")\n ).run(conn)\n\n*Example* Get all users where the zipcode is a string of 5 digits.\n\n r.table(\'users\').filter(lambda doc:\n doc[\'zipcode\'].match("\\d{5}")\n ).run(conn)\n\n*Example* Retrieve the domain of a basic email\n\n r.expr("name@domain.com").match(".*@(.*)").run(conn)\n\nResult:\n\n {\n "start": 0,\n "end": 20,\n "str": "name@domain.com",\n "groups":[\n {\n "end": 17,\n "start": 7,\n "str": "domain.com"\n }\n ]\n }\n\nYou can then retrieve only the domain with the [\\[\\]](http://rethinkdb.com/api/python/get_field) selector.\n\n r.expr("name@domain.com").match(".*@(.*)")["groups"][0]["str"].run(conn)\n\nReturns `\'domain.com\'`\n\n*Example* Fail to parse out the domain and returns `None`.\n\n r.expr("name[at]domain.com").match(".*@(.*)").run(conn)\n'),
(rethinkdb.ast.RqlQuery.split, b'string.split([separator, [max_splits]]) -> array\n\nSplits a string into substrings. Splits on whitespace when called\nwith no arguments. When called with a separator, splits on that\nseparator. When called with a separator and a maximum number of\nsplits, splits on that separator at most `max_splits` times. (Can be\ncalled with `None` as the separator if you want to split on whitespace\nwhile still specifying `max_splits`.)\n\nMimics the behavior of Python\'s `string.split` in edge cases, except\nfor splitting on the empty string, which instead produces an array of\nsingle-character strings.\n\n*Example* Split on whitespace.\n\n > r.expr("foo bar bax").split().run(conn)\n ["foo", "bar", "bax"]\n\n*Example* Split the entries in a CSV file.\n\n > r.expr("12,37,,22,").split(",").run(conn)\n ["12", "37", "", "22", ""]\n\n*Example* Split a string into characters.\n\n > r.expr("mlucy").split("").run(conn)\n ["m", "l", "u", "c", "y"]\n\n*Example* Split the entries in a CSV file, but only at most 3\ntimes.\n\n > r.expr("12,37,,22,").split(",", 3).run(conn)\n ["12", "37", "", "22,"]\n\n*Example* Split on whitespace at most once (i.e. get the first word).\n\n > r.expr("foo bar bax").split(None, 1).run(conn)\n ["foo", "bar bax"]\n'),
(rethinkdb.ast.RqlQuery.upcase, b'string.upcase() -> string\n\nUppercases a string.\n\n*Example*\n\n > r.expr("Sentence about LaTeX.").upcase().run(conn)\n "SENTENCE ABOUT LATEX."\n\n__Note:__ `upcase` and `downcase` only affect ASCII characters.\n'),
(rethinkdb.ast.RqlQuery.concat_map, b'stream.concat_map(function) -> stream\narray.concat_map(function) -> array\n\nConcatenate one or more elements into a single sequence using a mapping function.\n\n`concat_map` works in a similar fashion to [map](http://rethinkdb.com/api/python/map/), applying the given function to each element in a sequence, but it will always return a single sequence. If the mapping function returns a sequence, `map` would produce a sequence of sequences:\n\n r.expr([1, 2, 3]).map(lambda x: [x, x.mul(2)]).run(conn)\n\nResult:\n\n [[1, 2], [2, 4], [3, 6]]\n\nWhereas `concat_map` with the same mapping function would merge those sequences into one:\n\n r.expr([1, 2, 3]).concat_map(lambda x: [x, x.mul(2)]).run(conn)\n\nResult:\n\n [1, 2, 2, 4, 3, 6]\n\nThe return value, array or stream, will be the same type as the input.\n\n*Example* Construct a sequence of all monsters defeated by Marvel heroes. The field "defeatedMonsters" is an array of one or more monster names.\n\n r.table(\'marvel\').concat_map(lambda hero: hero[\'defeatedMonsters\']).run(conn)\n\n*Example* Simulate an [eq_join](http://rethinkdb.com/api/python/eq_join/) using `concat_map`. (This is how ReQL joins are implemented internally.)\n\n r.table(\'posts\').concat_map(\n lambda post: r.table(\'comments\').get_all(\n post[\'id\'], index=\'post_id\'\n ).map(\n lambda comment: { \'left\': post, \'right\': comment}\n )\n ).run(conn)\n'),
(rethinkdb.ast.RqlQuery.is_empty, b"sequence.is_empty() -> bool\n\nTest if a sequence is empty.\n\n*Example* Are there any documents in the marvel table?\n\n r.table('marvel').is_empty().run(conn)\n\n"),
(rethinkdb.ast.RqlQuery.limit, b"sequence.limit(n) -> stream\narray.limit(n) -> array\n\nEnd the sequence after the given number of elements.\n\n*Example* Only so many can fit in our Pantheon of heroes.\n\n r.table('marvel').order_by('belovedness').limit(10).run(conn)\n"),
(rethinkdb.ast.RqlQuery.map, b"sequence1.map([sequence2, ...], function) -> stream\narray1.map([array2, ...], function) -> array\nr.map(sequence1[, sequence2, ...], function) -> stream\nr.map(array1[, array2, ...], function) -> array\n\nTransform each element of one or more sequences by applying a mapping function to them. If `map` is run with two or more sequences, it will iterate for as many items as there are in the shortest sequence.\n\nNote that `map` can only be applied to sequences, not single values. If you wish to apply a function to a single value/selection (including an array), use the [do](http://rethinkdb.com/api/python/do) command.\n\n*Example* Return the first five squares.\n\n > r.expr([1, 2, 3, 4, 5]).map(lambda val: (val * val)).run(conn)\n \n [1, 4, 9, 16, 25]\n\n*Example* Sum the elements of three sequences.\n\n > sequence1 = [100, 200, 300, 400]\n > sequence2 = [10, 20, 30, 40]\n > sequence3 = [1, 2, 3, 4]\n > r.map(sequence1, sequence2, sequence3,\n lambda val1, val2, val3: (val1 + val2 + val3)).run(conn)\n \n [111, 222, 333, 444]\n\n*Example* Rename a field when retrieving documents using `map` and [merge](http://rethinkdb.com/api/python/merge/).\n\nThis example renames the field `id` to `user_id` when retrieving documents from the table `users`.\n\n r.table('users').map(\n lambda doc: doc.merge({'user_id': doc['id']}).without('id')).run(conn)\n\nNote that in this case, [row](http://rethinkdb.com/api/python/row) may be used as an alternative to writing an anonymous function, as it returns the same value as the function parameter receives:\n\n r.table('users').map(\n r.row.merge({'user_id': r.row['id']}).without('id')).run(conn)\n\n*Example* Assign every superhero an archenemy.\n\n r.table('heroes').map(r.table('villains'),\n lambda hero, villain: hero.merge({'villain': villain})).run(conn)\n"),
(rethinkdb.ast.RqlQuery.nth, b"sequence.nth(index) -> object\nselection.nth(index) -> selection<object>\n\nGet the *nth* element of a sequence, counting from zero. If the argument is negative, count from the last element.\n\n*Example* Select the second element in the array.\n\n r.expr([1,2,3]).nth(1).run(conn)\n r.expr([1,2,3])[1].run(conn)\n\n*Example* Select the bronze medalist from the competitors.\n\n r.table('players').order_by(index=r.desc('score')).nth(3).run(conn)\n\n*Example* Select the last place competitor.\n\n r.table('players').order_by(index=r.desc('score')).nth(-1).run(conn)\n"),
(rethinkdb.ast.RqlQuery.offsets_of, b"sequence.offsets_of(datum | predicate_function) -> array\n\nGet the indexes of an element in a sequence. If the argument is a predicate, get the indexes of all elements matching it.\n\n*Example* Find the position of the letter 'c'.\n\n r.expr(['a','b','c']).offsets_of('c').run(conn)\n\n*Example* Find the popularity ranking of invisible heroes.\n\n r.table('marvel').union(r.table('dc')).order_by('popularity').offsets_of(\n r.row['superpowers'].contains('invisibility')\n ).run(conn)\n\n"),
(rethinkdb.ast.Table.order_by, b'table.order_by([key | function], index=index_name) -> table_slice\nselection.order_by(key | function[, ...]) -> selection<array>\nsequence.order_by(key | function[, ...]) -> array\n\nSort the sequence by document values of the given key(s). To specify\nthe ordering, wrap the attribute with either `r.asc` or `r.desc`\n(defaults to ascending).\n\n__Note:__ RethinkDB uses byte-wise ordering for `orderBy` and does not support Unicode collations; non-ASCII characters will be sorted by UTF-8 codepoint. For more information on RethinkDB\'s sorting order, read the section in [ReQL data types](http://rethinkdb.com/docs/data-types/#sorting-order).\n\nSorting without an index requires the server to hold the sequence in\nmemory, and is limited to 100,000 documents (or the setting of the `arrayLimit` option for [run](http://rethinkdb.com/api/python/run)). Sorting with an index can\nbe done on arbitrarily large tables, or after a [between](http://rethinkdb.com/api/python/between/) command\nusing the same index. This applies to both secondary indexes and the primary key (e.g., `index=\'id\'`).\n\n*Example* Order all the posts using the index `date`. \n\n r.table(\'posts\').order_by(index=\'date\').run(conn)\n\nThe index must either be the primary key or have been previously created with [index_create](http://rethinkdb.com/api/python/index_create/).\n\n r.table(\'posts\').index_create(\'date\').run(conn)\n\nYou can also select a descending ordering:\n\n r.table(\'posts\').order_by(index=r.desc(\'date\')).run(conn, callback)\n\n*Example* Order a sequence without an index.\n\n r.table(\'posts\').get(1)[\'comments\'].order_by(\'date\')\n\nYou can also select a descending ordering:\n\n r.table(\'posts\').get(1)[\'comments\'].order_by(r.desc(\'date\'))\n\nIf you\'re doing ad-hoc analysis and know your table won\'t have more then 100,000\nelements (or you\'ve changed the setting of the `arrayLimit` option for [run](http://rethinkdb.com/api/python/run)) you can run `order_by` without an index:\n\n r.table(\'small_table\').order_by(\'date\')\n\n*Example* You can efficiently order using multiple fields by using a\n[compound index](http://www.rethinkdb.com/docs/secondary-indexes/python/).\n\nOrder by date and title.\n\n r.table(\'posts\').order_by(index=\'date_and_title\').run(conn)\n\nThe index must either be the primary key or have been previously created with [index_create](http://rethinkdb.com/api/python/index_create/).\n\n r.table(\'posts\').index_create(\'date_and_title\', lambda post:\n [post["date"], post["title"]]).run(conn)\n\n_Note_: You cannot specify multiple orders in a compound index. See [issue #2306](https://github.com/rethinkdb/rethinkdb/issues/2306)\nto track progress.\n\n*Example* If you have a sequence with fewer documents than the `array_limit`, you can order it\nby multiple fields without an index.\n\n r.table(\'small_table\').order_by(\'date\', r.desc(\'title\'))\n\n*Example* Notice that an index ordering always has highest\nprecedence. The following query orders posts by date, and if multiple\nposts were published on the same date, they will be ordered by title.\n\n r.table(\'post\').order_by(\'title\', index=\'date\').run(conn)\n*Example* You can use [nested field](http://rethinkdb.com/docs/cookbook/python/#filtering-based-on-nested-fields) syntax to sort on fields from subdocuments. (You can also create indexes on nested fields using this syntax with `index_create`.)\n\n r.table(\'user\').order_by(lambda user: user[\'group\'][\'id\']).run(conn)\n\n*Example* You can efficiently order data on arbitrary expressions using indexes.\n\n r.table(\'posts\').order_by(index=\'votes\').run(conn)\n\nThe index must have been previously created with [index_create](http://rethinkdb.com/api/ruby/index_create/).\n\n r.table(\'posts\').index_create(\'votes\', lambda post:\n post["upvotes"]-post["downvotes"]\n ).run(conn)\n\n*Example* If you have a sequence with fewer documents than the `array_limit`, you can order it with an arbitrary function directly.\n\n r.table(\'small_table\').order_by(lambda doc:\n doc[\'upvotes\']-doc[\'downvotes\']\n );\n\nYou can also select a descending ordering:\n\n r.table(\'small_table\').order_by(r.desc(lambda doc:\n doc[\'upvotes\']-doc[\'downvotes\']\n ));\n\n*Example* Ordering after a `between` command can be done as long as the same index is being used.\n\n r.table("posts").between(r.time(2013, 1, 1, \'+00:00\'), r.time(2013, 1, 1, \'+00:00\'), index=\'date\')\n .order_by(index=\'date\').run(conn);\n\n'),
(rethinkdb.ast.RqlQuery.sample, b"sequence.sample(number) -> selection\nstream.sample(number) -> array\narray.sample(number) -> array\n\nSelect a given number of elements from a sequence with uniform random distribution. Selection is done without replacement.\n\nIf the sequence has less than the requested number of elements (i.e., calling `sample(10)` on a sequence with only five elements), `sample` will return the entire sequence in a random order.\n\n*Example* Select 3 random heroes.\n\n r.table('marvel').sample(3).run(conn)\n"),
(rethinkdb.ast.RqlQuery.skip, b"sequence.skip(n) -> stream\narray.skip(n) -> array\n\nSkip a number of elements from the head of the sequence.\n\n*Example* Here in conjunction with [order_by](http://rethinkdb.com/api/python/order_by/) we choose to ignore the most successful heroes.\n\n r.table('marvel').order_by('successMetric').skip(10).run(conn)\n\n"),
(rethinkdb.ast.RqlQuery.slice, b"selection.slice(start_index[, end_index, left_bound='closed', right_bound='open']) -> selection\nstream.slice(start_index[, end_index, left_bound='closed', right_bound='open']) -> stream\narray.slice(start_index[, end_index, left_bound='closed', right_bound='open']) -> array\nbinary.slice(start_index[, end_index, left_bound='closed', right_bound='open']) -> binary\n\nReturn the elements of a sequence within the specified range.\n\n`slice` returns the range between `start_index` and `end_index`. If only `start_index` is specified, `slice` returns the range from that index to the end of the sequence. Specify `left_bound` or `right_bound` as `open` or `closed` to indicate whether to include that endpoint of the range by default: `closed` returns that endpoint, while `open` does not. By default, `left_bound` is closed and `right_bound` is open, so the range `(10,13)` will return the tenth, eleventh and twelfth elements in the sequence.\n\nIf `end_index` is past the end of the sequence, all elements from `start_index` to the end of the sequence will be returned. If `start_index` is past the end of the sequence or `end_index` is less than `start_index`, a zero-element sequence will be returned (although see below for negative `end_index` values). An error will be raised on a negative `start_index`.\n\nA negative `end_index` is allowed with arrays; in that case, the returned range counts backward from the array's end. That is, the range of `(2,-1)` returns the second element through the next-to-last element of the range. A negative `end_index` is not allowed with a stream. (An `end_index` of −1 *is* allowed with a stream if `right_bound` is closed; this behaves as if no `end_index` was specified.)\n\nIf `slice` is used with a [binary](http://rethinkdb.com/api/python/binary) object, the indexes refer to byte positions within the object. That is, the range `(10,20)` will refer to the 10th byte through the 19th byte.\n\nIf you are only specifying the indexes and not the bounding options, you may use Python's slice operator as a shorthand: `[start_index:end_index]`.\n\n**Example:** Return the fourth, fifth and sixth youngest players. (The youngest player is at index 0, so those are elements 3–5.)\n\n r.table('players').order_by(index='age').slice(3,6).run(conn)\n\nOr, using Python's slice operator:\n\n r.table('players').filter({'class': 'amateur'})[10:20].run(conn)\n\n**Example:** Return all but the top three players who have a red flag.\n\n r.table('players').filter({'flag': 'red'}).order_by(index=r.desc('score')).slice(3).run(conn)\n\n**Example:** Return holders of tickets `X` through `Y`, assuming tickets are numbered sequentially. We want to include ticket `Y`.\n\n r.table('users').order_by(index='ticket').slice(x, y, right_bound='closed').run(conn)\n\n**Example:** Return the elements of an array from the second through two from the end (that is, not including the last two).\n\n r.expr([0,1,2,3,4,5]).slice(2,-2).run(conn)\n\nResult:\n\n [2,3]\n"),
(rethinkdb.ast.RqlQuery.union, b"stream.union(sequence[, sequence, ...]) -> stream\narray.union(sequence[, sequence, ...]) -> array\n\nMerge two or more sequences. (Note that ordering is not guaranteed by `union`.)\n\n*Example* Construct a stream of all heroes.\n\n r.table('marvel').union(r.table('dc')).run(conn)\n\n*Example* Combine four arrays into one.\n\n r.expr([1, 2]).union([3, 4], [5, 6], [7, 8, 9]).run(conn)\n \n [1, 2, 3, 4, 5, 6, 7, 8, 9]\n"),
(rethinkdb.ast.RqlQuery.with_fields, b"sequence.with_fields([selector1, selector2...]) -> stream\narray.with_fields([selector1, selector2...]) -> array\n\nPlucks one or more attributes from a sequence of objects, filtering out any objects in the sequence that do not have the specified fields. Functionally, this is identical to [has_fields](http://rethinkdb.com/api/python/has_fields/) followed by [pluck](http://rethinkdb.com/api/python/pluck/) on a sequence.\n\n*Example* Get a list of users and their posts, excluding any users who have not made any posts.\n\nExisting table structure:\n\n [\n { 'id': 1, 'user': 'bob', 'email': 'bob@foo.com', 'posts': [ 1, 4, 5 ] },\n { 'id': 2, 'user': 'george', 'email': 'george@foo.com' },\n { 'id': 3, 'user': 'jane', 'email': 'jane@foo.com', 'posts': [ 2, 3, 6 ] }\n ]\n\nCommand and output:\n\n r.table('users').with_fields('id', 'user', 'posts').run(conn)\n \n [\n { 'id': 1, 'user': 'bob', 'posts': [ 1, 4, 5 ] },\n { 'id': 3, 'user': 'jane', 'posts': [ 2, 3, 6 ] }\n ]\n\n*Example* Use the [nested field syntax](http://rethinkdb.com/docs/nested-fields/) to get a list of users with cell phone numbers in their contacts.\n\n r.table('users').with_fields('id', 'user', {contact: {'phone': 'work'}).run(conn)\n"),
(rethinkdb.ast.Table.delete, b'table.delete([durability="hard", return_changes=False])\n -> object\nselection.delete([durability="hard", return_changes=False])\n -> object\nsingleSelection.delete([durability="hard", return_changes=False])\n -> object\n\nDelete one or more documents from a table.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override the\ntable or query\'s durability setting (set in [run](http://rethinkdb.com/api/python/run/)). \nIn soft durability mode RethinkDB will acknowledge the write immediately after\nreceiving it, but before the write has been committed to disk.\n- `return_changes`:\n - `True`: return a `changes` array consisting of `old_val`/`new_val` objects describing the changes made, only including the documents actually updated.\n - `False`: do not return a `changes` array (the default).\n - `"always"`: behave as `True`, but include all documents the command tried to update whether or not the update was successful. (This was the behavior of `True` pre-2.0.)\n\nDelete returns an object that contains the following attributes:\n\n- `deleted`: the number of documents that were deleted.\n- `skipped`: the number of documents that were skipped. \nFor example, if you attempt to delete a batch of documents, and another concurrent query\ndeletes some of those documents first, they will be counted as skipped.\n- `errors`: the number of errors encountered while performing the delete.\n- `first_error`: If errors were encountered, contains the text of the first error.\n- `inserted`, `replaced`, and `unchanged`: all 0 for a delete operation.\n- `changes`: if `return_changes` is set to `True`, this will be an array of objects, one for each objected affected by the `delete` operation. Each object will have two keys: `{"new_val": None, "old_val": <old value>}`.\n\n*Example* Delete a single document from the table `comments`.\n\n r.table("comments").get("7eab9e63-73f1-4f33-8ce4-95cbea626f59").delete().run(conn)\n\n*Example* Delete all documents from the table `comments`.\n\n r.table("comments").delete().run(conn)\n\n*Example* Delete all comments where the field `id_post` is `3`.\n\n r.table("comments").filter({"id_post": 3}).delete().run(conn)\n\n*Example* Delete a single document from the table `comments` and return its value.\n\n r.table("comments").get("7eab9e63-73f1-4f33-8ce4-95cbea626f59").delete(return_changes=True).run(conn)\n\nThe result will look like:\n\n {\n "deleted": 1,\n "errors": 0,\n "inserted": 0,\n "changes": [\n {\n "new_val": None,\n "old_val": {\n "id": "7eab9e63-73f1-4f33-8ce4-95cbea626f59",\n "author": "William",\n "comment": "Great post",\n "id_post": 3\n }\n }\n ],\n "replaced": 0,\n "skipped": 0,\n "unchanged": 0\n }\n\n*Example* Delete all documents from the table `comments` without waiting for the\noperation to be flushed to disk.\n\n r.table("comments").delete(durability="soft"}).run(conn)\n'),
(rethinkdb.ast.Table.insert, b'table.insert(object | [object1, object2, ...][, durability="hard", return_changes=False, conflict="error"])\n -> object\n\nInsert documents into a table. Accepts a single document or an array of\ndocuments.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override the table or query\'s durability setting (set in [run](http://rethinkdb.com/api/python/run/)). In soft durability mode RethinkDB will acknowledge the write immediately after receiving and caching it, but before the write has been committed to disk.\n- `return_changes`:\n - `True`: return a `changes` array consisting of `old_val`/`new_val` objects describing the changes made, only including the documents actually updated.\n - `False`: do not return a `changes` array (the default).\n - `"always"`: behave as `True`, but include all documents the command tried to update whether or not the update was successful. (This was the behavior of `True` pre-2.0.)\n- `conflict`: Determine handling of inserting documents with the same primary key as existing entries. Possible values are `"error"`, `"replace"` or `"update"`.\n - `"error"`: Do not insert the new document and record the conflict as an error. This is the default.\n - `"replace"`: [Replace](http://rethinkdb.com/api/python/replace/) the old document in its entirety with the new one.\n - `"update"`: [Update](http://rethinkdb.com/api/python/update/) fields of the old document with fields from the new one.\n\nInsert returns an object that contains the following attributes:\n\n- `inserted`: the number of documents successfully inserted.\n- `replaced`: the number of documents updated when `conflict` is set to `"replace"` or `"update"`.\n- `unchanged`: the number of documents whose fields are identical to existing documents with the same primary key when `conflict` is set to `"replace"` or `"update"`.\n- `errors`: the number of errors encountered while performing the insert.\n- `first_error`: If errors were encountered, contains the text of the first error.\n- `deleted` and `skipped`: 0 for an insert operation.\n- `generated_keys`: a list of generated primary keys for inserted documents whose primary keys were not specified (capped to 100,000).\n- `warnings`: if the field `generated_keys` is truncated, you will get the warning _"Too many generated keys (<X>), array truncated to 100000."_.\n- `changes`: if `return_changes` is set to `True`, this will be an array of objects, one for each objected affected by the `insert` operation. Each object will have two keys: `{"new_val": <new value>, "old_val": None}`.\n\n*Example* Insert a document into the table `posts`.\n\n r.table("posts").insert({\n "id": 1,\n "title": "Lorem ipsum",\n "content": "Dolor sit amet"\n }).run(conn)\n\nThe result will be:\n\n {\n "deleted": 0,\n "errors": 0,\n "inserted": 1,\n "replaced": 0,\n "skipped": 0,\n "unchanged": 0\n }\n\n*Example* Insert a document without a defined primary key into the table `posts` where the\nprimary key is `id`.\n\n r.table("posts").insert({\n "title": "Lorem ipsum",\n "content": "Dolor sit amet"\n }).run(conn)\n\nRethinkDB will generate a primary key and return it in `generated_keys`.\n\n {\n "deleted": 0,\n "errors": 0,\n "generated_keys": [\n "dd782b64-70a7-43e4-b65e-dd14ae61d947"\n ],\n "inserted": 1,\n "replaced": 0,\n "skipped": 0,\n "unchanged": 0\n }\n\nRetrieve the document you just inserted with:\n\n r.table("posts").get("dd782b64-70a7-43e4-b65e-dd14ae61d947").run(conn)\n\nAnd you will get back:\n\n {\n "id": "dd782b64-70a7-43e4-b65e-dd14ae61d947",\n "title": "Lorem ipsum",\n "content": "Dolor sit amet",\n }\n\n*Example* Insert multiple documents into the table `users`.\n\n r.table("users").insert([\n {"id": "william", "email": "william@rethinkdb.com"},\n {"id": "lara", "email": "lara@rethinkdb.com"}\n ]).run(conn)\n\n*Example* Insert a document into the table `users`, replacing the document if the document\nalready exists. \n\n r.table("users").insert(\n {"id": "william", "email": "william@rethinkdb.com"},\n conflict="replace"\n ).run(conn)\n\n*Example* Copy the documents from `posts` to `posts_backup`.\n\n r.table("posts_backup").insert( r.table("posts") ).run(conn)\n\n*Example* Get back a copy of the inserted document (with its generated primary key).\n\n r.table("posts").insert(\n {"title": "Lorem ipsum", "content": "Dolor sit amet"},\n return_changes=True\n ).run(conn)\n\nThe result will be\n\n {\n "deleted": 0,\n "errors": 0,\n "generated_keys": [\n "dd782b64-70a7-43e4-b65e-dd14ae61d947"\n ],\n "inserted": 1,\n "replaced": 0,\n "skipped": 0,\n "unchanged": 0,\n "changes": [\n {\n "old_val": None,\n "new_val": {\n "id": "dd782b64-70a7-43e4-b65e-dd14ae61d947",\n "title": "Lorem ipsum",\n "content": "Dolor sit amet"\n }\n }\n ]\n }\n'),
(rethinkdb.ast.Table.replace, b'table.replace(object | function[, durability="hard", return_changes=False, non_atomic=False])\n -> object\nselection.replace(object | function[, durability="hard", return_changes=False, non_atomic=False])\n -> object\nsingleSelection.replace(object | function[, durability="hard", return_changes=False, non_atomic=False])\n -> object\n\nReplace documents in a table. Accepts a JSON document or a ReQL expression,\nand replaces the original document with the new one. The new document must\nhave the same primary key as the original document.\n\nThe `replace` command can be used to both insert and delete documents. If\nthe "replaced" document has a primary key that doesn\'t exist in the table,\nthe document will be inserted; if an existing document is replaced with\n`None`, the document will be deleted. Since `update` and `replace` operations\nare performed atomically, this allows atomic inserts and deletes as well.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override\n the table or query\'s durability setting (set in [run](http://rethinkdb.com/api/python/run/)). In\n soft durability mode RethinkDB will acknowledge the write immediately after\n receiving it, but before the write has been committed to disk.\n- `return_changes`:\n - `True`: return a `changes` array consisting of `old_val`/`new_val` objects\n describing the changes made, only including the documents actually\n updated.\n - `False`: do not return a `changes` array (the default).\n - `"always"`: behave as `True`, but include all documents the command tried\n to update whether or not the update was successful. (This was the behavior\n of `True` pre-2.0.)\n- `non_atomic`: if set to `True`, executes the replacement and distributes the\n result to replicas in a non-atomic fashion. This flag is required to perform\n non-deterministic updates, such as those that require reading data from\n another table.\n\nReplace returns an object that contains the following attributes:\n\n- `replaced`: the number of documents that were replaced.\n- `unchanged`: the number of documents that would have been modified, except\n that the new value was the same as the old value.\n- `inserted`: the number of new documents added. A document is considered inserted if its primary key did not exist in the table at the time of the `replace` operation.\n- `deleted`: the number of deleted documents when doing a replace with `None`.\n- `errors`: the number of errors encountered while performing the replace.\n- `first_error`: If errors were encountered, contains the text of the first\n error.\n- `skipped`: 0 for a replace operation.\n- `changes`: if `return_changes` is set to `True`, this will be an array of\n objects, one for each objected affected by the `replace` operation. Each\n object will have two keys: `{"new_val": <new value>, "old_val": <old value>}`.\n\n*Example* Replace the document with the primary key `1`.\n\n r.table("posts").get(1).replace({\n "id": 1,\n "title": "Lorem ipsum",\n "content": "Aleas jacta est",\n "status": "draft"\n }).run(conn)\n\n*Example* Remove the field `status` from all posts.\n\n r.table("posts").replace(lambda post:\n post.without("status")\n ).run(conn)\n\n*Example* Remove all the fields that are not `id`, `title` or `content`.\n\n r.table("posts").replace(lambda post:\n post.pluck("id", "title", "content")\n ).run(conn)\n\n*Example* Replace the document with the primary key `1` using soft durability.\n\n r.table("posts").get(1).replace({\n "id": 1,\n "title": "Lorem ipsum",\n "content": "Aleas jacta est",\n "status": "draft"\n }, durability="soft").run(conn)\n\n*Example* Replace the document with the primary key `1` and return the values of the document before\nand after the replace operation.\n\n r.table("posts").get(1).replace({\n "id": 1,\n "title": "Lorem ipsum",\n "content": "Aleas jacta est",\n "status": "published"\n }, return_changes=True).run(conn)\n\nThe result will have a `changes` field:\n\n {\n "deleted": 0,\n "errors": 0,\n "inserted": 0,\n "changes": [\n {\n "new_val": {\n "id":1,\n "title": "Lorem ipsum"\n "content": "Aleas jacta est",\n "status": "published",\n },\n "old_val": {\n "id":1,\n "title": "Lorem ipsum"\n "content": "TODO",\n "status": "draft",\n "author": "William",\n }\n }\n ], \n "replaced": 1,\n "skipped": 0,\n "unchanged": 0\n }\n'),
(rethinkdb.ast.Table.sync, b'table.sync() -> object\n\n`sync` ensures that writes on a given table are written to permanent storage. Queries\nthat specify soft durability (`durability=\'soft\'`) do not give such guarantees, so\n`sync` can be used to ensure the state of these queries. A call to `sync` does not return\nuntil all previous writes to the table are persisted.\n\nIf successful, the operation returns an object: `{"synced": 1}`.\n\n*Example* After having updated multiple heroes with soft durability, we now want to wait\nuntil these changes are persisted.\n\n r.table(\'marvel\').sync().run(conn)\n\n'),
(rethinkdb.ast.Table.update, b'table.update(object | function[, durability="hard", return_changes=False, non_atomic=False])\n -> object\nselection.update(object | function[, durability="hard", return_changes=False, non_atomic=False])\n -> object\nsingleSelection.update(object | function[, durability="hard", return_changes=False, non_atomic=False])\n -> object\n\nUpdate JSON documents in a table. Accepts a JSON document, a ReQL expression, or a combination of the two.\n\nThe optional arguments are:\n\n- `durability`: possible values are `hard` and `soft`. This option will override the table or query\'s durability setting (set in [run](http://rethinkdb.com/api/python/run/)). In soft durability mode RethinkDB will acknowledge the write immediately after receiving it, but before the write has been committed to disk.\n- `return_changes`:\n - `True`: return a `changes` array consisting of `old_val`/`new_val` objects describing the changes made, only including the documents actually updated.\n - `False`: do not return a `changes` array (the default).\n - `"always"`: behave as `True`, but include all documents the command tried to update whether or not the update was successful. (This was the behavior of `True` pre-2.0.)\n- `non_atomic`: if set to `True`, executes the update and distributes the result to replicas in a non-atomic fashion. This flag is required to perform non-deterministic updates, such as those that require reading data from another table.\n\nUpdate returns an object that contains the following attributes:\n\n- `replaced`: the number of documents that were updated.\n- `unchanged`: the number of documents that would have been modified except the new value was the same as the old value.\n- `skipped`: the number of documents that were skipped because the document didn\'t exist.\n- `errors`: the number of errors encountered while performing the update.\n- `first_error`: If errors were encountered, contains the text of the first error.\n- `deleted` and `inserted`: 0 for an update operation.\n- `changes`: if `return_changes` is set to `True`, this will be an array of objects, one for each objected affected by the `update` operation. Each object will have two keys: `{"new_val": <new value>, "old_val": <old value>}`.\n\n*Example* Update the status of the post with `id` of `1` to `published`.\n\n r.table("posts").get(1).update({"status": "published"}).run(conn)\n\n*Example* Update the status of all posts to `published`.\n\n r.table("posts").update({"status": "published"}).run(conn)\n\n*Example* Update the status of all the posts written by William.\n\n r.table("posts").filter({"author": "William"}).update({"status": "published"}).run(conn)\n\n*Example* Increment the field `view` of the post with `id` of `1`.\nThis query will throw an error if the field `views` doesn\'t exist.\n\n r.table("posts").get(1).update({\n "views": r.row["views"]+1\n }).run(conn)\n\n*Example* Increment the field `view` of the post with `id` of `1`.\nIf the field `views` does not exist, it will be set to `0`.\n\n r.table("posts").get(1).update({\n "views": (r.row["views"]+1).default(0)\n }).run(conn)\n\n*Example* Perform a conditional update. \nIf the post has more than 100 views, set the `type` of a post to `hot`, else set it to `normal`.\n\n r.table("posts").get(1).update(lambda post:\n r.branch(\n post["views"] > 100,\n {"type": "hot"},\n {"type": "normal"}\n )\n ).run(conn)\n\n*Example* Update the field `num_comments` with the result of a sub-query. Because this update is not atomic, you must pass the `non_atomic` flag.\n\n r.table("posts").get(1).update({\n "num_comments": r.table("comments").filter({"id_post": 1}).count()\n }, non_atomic=True).run(conn)\n\nIf you forget to specify the `non_atomic` flag, you will get a `ReqlRuntimeError`:\n\nReqlRuntimeError: Could not prove function deterministic. Maybe you want to use the non_atomic flag? \n\n*Example* Update the field `num_comments` with a random value between 0 and 100. This update cannot be proven deterministic because of `r.js` (and in fact is not), so you must pass the `non_atomic` flag.\n\n r.table("posts").get(1).update({\n "num_comments": r.js("Math.floor(Math.random()*100)")\n }, non_atomic=True).run(conn)\n\n*Example* Update the status of the post with `id` of `1` using soft durability.\n\n r.table("posts").get(1).update({status: "published"}, durability="soft").run(conn)\n\n*Example* Increment the field `views` and return the values of the document before and after the update operation.\n\n r.table("posts").get(1).update({\n "views": r.row["views"]+1\n }, return_changes=True).run(conn)\n\nThe result will now include a `changes` field:\n\n {\n "deleted": 0,\n "errors": 0,\n "inserted": 0,\n "changes": [\n {\n "new_val": {\n "id": 1,\n "author": "Julius_Caesar",\n "title": "Commentarii de Bello Gallico",\n "content": "Aleas jacta est",\n "views": 207\n },\n "old_val": {\n "id": 1,\n "author": "Julius_Caesar",\n "title": "Commentarii de Bello Gallico",\n "content": "Aleas jacta est",\n "views": 206\n }\n }\n ],\n "replaced": 1,\n "skipped": 0,\n "unchanged": 0\n }\n\nThe `update` command supports RethinkDB\'s nested field syntax to update subdocuments. Consider a user table with contact information in this format:\n\n {\n "id": 10001,\n "name": "Bob Smith",\n "contact": {\n "phone": {\n "work": "408-555-1212",\n "home": "408-555-1213",\n "cell": "408-555-1214"\n },\n "email": {\n "work": "bob@smith.com",\n "home": "bobsmith@example.com",\n "other": "bobbys@moosecall.net"\n },\n "im": {\n "skype": "Bob Smith",\n "aim": "bobmoose",\n "icq": "nobodyremembersicqnumbers"\n }\n },\n "notes": [\n {\n "date": r.time(2014,1,1,\'Z\'),\n "from": "John Doe",\n "subject": "My name is even more boring than Bob\'s"\n },\n {\n "date": r.time(2014,2,2,\'Z\'),\n "from": "Bob Smith Sr",\n "subject": "Happy Second of February"\n }\n ]\n }\n\n*Example* Update Bob Smith\'s cell phone number.\n\n r.table("users").get(10001).update(\n {"contact": {"phone": {"cell": "408-555-4242"}}}\n ).run(conn)\n\n*Example* Add another note to Bob Smith\'s record.\n\n new_note = {\n "date": r.now(),\n "from": "Inigo Montoya",\n "subject": "You killed my father"\n }\n r.table("users").get(10001).update(\n {"notes": r.row["notes"].append(new_note)}\n ).run(conn)\n\n*Example* Send a note to every user with an ICQ number.\n\n icq_note = {\n "date": r.now(),\n "from": "Admin",\n "subject": "Welcome to the future"\n }\n r.table("users").filter(\n r.row.has_fields({"contact": {"im": "icq"}})\n ).update(\n {"notes": r.row["notes"].append(icq_note)}\n ).run(conn)\n\n*Example* Replace all of Bob\'s IM records. Normally, `update` will merge nested documents together; to replace the entire `"im"` document, use the literal command.\n\n r.table(\'users\').get(10001).update(\n {"contact": {"im": r.literal({"aim": "themoosemeister"})}}\n ).run(conn)\n'),
]
for function, text in docsSource:
try:
text = str(text.decode('utf-8'))
except UnicodeEncodeError:
pass
if hasattr(function, "__func__"):
function.__func__.__doc__ = text
else:
function.__doc__ = text | /rethinkdb_next-2.2.0.post1.tar.gz/rethinkdb_next-2.2.0.post1/rethinkdb/docs.py | 0.890604 | 0.364721 | docs.py | pypi |
from __future__ import print_function
import json, math, numbers, os, socket, time
import rethinkdb as r
from optparse import OptionParser
from ._backup import *
info = "'_negative_zero_check` finds and lists inaccessible rows with negative zero in their ID"
usage = " _negative_zero_check [-c HOST:PORT] [-a AUTH_KEY] [-d DIR]"
def print_negative_zero_check_help():
print(info)
print(usage)
print("")
print(" -h [ --help ] print this help")
print(" -c [ --connect ] HOST:PORT host and client port of a rethinkdb node to connect")
print(" to (defaults to localhost:28015)")
print(" -a [ --auth ] AUTH_KEY authorization key for rethinkdb clients")
print(" -f [ --file ] FILE file to write rows to (stdout by default)")
print("")
print("EXAMPLES:")
print("_negative_zero_check -c mnemosyne:39500")
print(" List all matching rows from a cluster running on host 'mnemosyne' with a client port at 39500.")
print("")
print("_negative_zero_check -d negative_zero_matches")
print(" Export all matching rows on a local cluster into a named directory.")
print("")
print("_negative_zero_check -c hades -a hunter2")
print(" List all matching rows from a cluster running on host 'hades' which requires authorization.")
def parse_options():
parser = OptionParser(add_help_option=False, usage=usage)
parser.add_option("-c", "--connect", dest="host", metavar="HOST:PORT", default="localhost:28015", type="string")
parser.add_option("-a", "--auth", dest="auth_key", metavar="AUTH_KEY", default="", type="string")
parser.add_option("-f", "--file", dest="out_file", metavar="FILE", default=None, type="string")
parser.add_option("-h", "--help", dest="help", default=False, action="store_true")
(options, args) = parser.parse_args()
# Check validity of arguments
if len(args) != 0:
raise RuntimeError("Error: No positional arguments supported. Unrecognized option '%s'" % args[0])
if options.help:
print_negative_zero_check_help()
exit(0)
res = {"auth_key": options.auth_key}
# Verify valid host:port --connect option
(res["host"], res["port"]) = parse_connect_option(options.host)
# Verify valid directory option
if options.out_file is None:
res["out_file"] = sys.stdout
else:
if os.path.exists(options.out_file):
raise RuntimeError("Error: Output file already exists: %s" % options.out_file)
res["out_file"] = open(options.out_file, "w+")
return res
def is_negative_zero(x):
return x == 0.0 and math.copysign(1.0, x) == -1.0
def key_contains_negative_zero(x):
if isinstance(x, list):
return any(map(key_contains_negative_zero, x))
elif isinstance(x, numbers.Real):
return is_negative_zero(x)
return False
# True if the keys are equal, treating negative zero as unique from positive zero
def key_compare(left, right):
if isinstance(left, list):
if not isinstance(right, list):
return False
return all(map(key_compare, left, right))
elif isinstance(left, numbers.Real):
if not isinstance(right, numbers.Real):
return False
return left == right and (is_negative_zero(left) == is_negative_zero(right))
return left == right
def handle_row(db, table, key, is_duplicate, opts, stats):
stats[(db, table)] += 1
write_key(opts['out_file'],
' ' + json.dumps({ 'db': db,
'table': table,
'key': key,
'is_duplicate': is_duplicate }))
def write_key(out_file, json):
if not write_key.first_row:
out_file.write(',\n')
out_file.write(json)
write_key.first_row = False
write_key.first_row = True
# Process all rows in the cursor until no more are immediately available
# Returns True if the cursor has more results pending, False if the cursor is completed
def process_cursor(task, c, opts, stats):
db = task['db']
table = task['table']
cursor = task['cursor']
pkey = task['pkey']
try:
while True:
key = cursor.next(wait=False)
if key_contains_negative_zero(key):
# Check if the same row can be found using its key
row = r.db(db).table(table).get(key) \
.run(c, time_format='raw', binary_format='raw')
if row is None or not key_compare(key, row[pkey]):
# We could not retrieve the same row by its key, which means it was
# inserted before version 2.0 and is now inaccessible.
handle_row(db, table, key, row is not None, opts, stats)
except socket.timeout as ex:
return True
except r.ReqlCursorEmpty as ex:
return False
def print_summary(opts, stats):
counts = [count for count in stats.values() if count > 0]
total = sum(counts)
if total == 0:
print("In %d tables, found no rows with negative zero in their primary key." %
len(stats), file=sys.stderr)
else:
print("In %d of %d tables, found %d rows with negative zero in their primary key." %
(len(counts), len(stats), total), file=sys.stderr)
for db_table, count in stats.items():
if count > 0:
print(" %s: %d" % ('.'.join(db_table), count), file=sys.stderr)
def main():
try:
opts = parse_options()
except Exception as ex:
print("Usage:\n%s" % usage, file=sys.stderr)
print(ex, file=sys.stderr)
return 1
affected_key_types = r.expr(['NUMBER', 'ARRAY'])
stats = {}
tasks = []
try:
c = r.connect(opts["host"], opts["port"], auth_key=opts["auth_key"])
# Make sure the cluster isn't pre-2.0, where positive and negative zero are stored uniquely
check_minimum_version(None, c, (2, 0, 0))
for db in r.db_list().set_difference(['rethinkdb']).run(c):
for table in r.db(db).table_list().run(c):
stats[(db, table)] = 0
pkey = r.db(db).table(table).info()['primary_key'].run(c)
# Only get rows where the primary key is a number or array
cursor = r.db(db) \
.table(table)[pkey] \
.filter(lambda x: affected_key_types.contains(x.type_of())) \
.run(c, time_format='raw', binary_format='raw')
tasks.append({'db': db, 'table': table, 'cursor': cursor, 'pkey': pkey})
opts['out_file'].write('[\n')
while len(tasks) > 0:
tasks = [x for x in tasks if process_cursor(x, c, opts, stats)]
time.sleep(0.1) # Wait a little for more results so we don't kill CPU
opts['out_file'].write('\n]\n')
opts['out_file'].flush()
except Exception as ex:
print(ex, file=sys.stderr)
return 1;
print_summary(opts, stats)
return 0
if __name__ == "__main__":
exit(main()) | /rethinkdb_next-2.2.0.post1.tar.gz/rethinkdb_next-2.2.0.post1/rethinkdb/_negative_zero_check.py | 0.474875 | 0.208058 | _negative_zero_check.py | pypi |
import retico_core
import threading
import openai
import time
import os
class ChatGPTDialogueModule(retico_core.AbstractModule):
"""ChatGPT Dialogue Module that uses the OpenAI API to generate responses to user
input. The ChatGPTDialogueModule is not running locally, but uses the OpenAI API to
generate responses.
"""
@staticmethod
def name():
return "ChatGPT Dialogue Module"
@staticmethod
def description():
return "ChatGPT Dialogue Module that uses the OpenAI API to generate responses to user input."
@staticmethod
def input_ius():
return [retico_core.text.TextIU]
@staticmethod
def output_iu():
return retico_core.text.TextIU
def __init__(
self,
system_prompt,
model="gpt-3.5-turbo",
max_tokens=1000,
temperature=0.7,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
api_key=None,
max_history=9,
**kwargs,
):
"""Initializes the ChatGPT Dialogue Module with the given system prompt and gpt
parameters. The API key can be passed as an argument or set as an environment
variable. For a more specific description of the GPT parameters, see the OpenAI
documentation: https://platform.openai.com/docs/api-reference/chat
Args:
system_prompt (str): The system prompt that is used to prime the GPT model.
model (str, optional): The OpenAI model that should be used. Defaults to "gpt-3.5-turbo".
max_tokens (int, optional): The maximum number of tokens. Defaults to 1000.
temperature (float, optional): The temperature of the GPT model. Defaults to 0.7.
top_p (int, optional): Nucleus sampling parameter. Defaults to 1.
frequency_penalty (int, optional): Penalize tokens based on frequency. Defaults to 0.
presence_penalty (int, optional): Penalize tokens based on whether they appear in the text. Defaults to 0.
api_key (str, optional): OpenAI API key. Defaults to None.
max_history (int, optional): The maximum number of previous user inputs that are used to prime the GPT model. Defaults to 9.
"""
super().__init__(**kwargs)
self.system_prompt = system_prompt
self.model = model
self.max_tokens = max_tokens
self.temperature = temperature
self.top_p = top_p
self.frequency_penalty = frequency_penalty
self.presence_penalty = presence_penalty
self.max_history = max_history
self.history = []
self.run_loop = False
self.current_text = ""
if api_key is not None:
self.api_key = api_key
else:
self.api_key = os.getenv("OPENAI_API_KEY")
def setup(self):
openai.api_key = self.api_key
def prepare_run(self):
self.run_loop = True
threading.Thread(target=self._generate_loop).start()
def shutdown(self):
self.run_loop = False
def input_text(self):
"""Joins the current input into a single string and returns it.
Returns:
str: The current input as a single string.
"""
return " ".join([iu.text for iu in self.current_input])
def output_text(self):
"""Joins the current output into a single string and returns it.
Returns:
str: The current output as a single string.
"""
return " ".join([iu.text for iu in self.current_output])
def process_update(self, update_message):
"""Processes new incoming messgaes and activates the GPT model to generate a
response. The response is then added to the history and the response is passed
to the next module.
Args:
update_message (retico_core.UpdateMessage): The incoming UpdateMessage.
"""
for iu, ut in update_message:
if ut == retico_core.UpdateType.ADD:
self.current_input.append(iu)
self.latest_input_iu = iu
elif ut == retico_core.UpdateType.REVOKE:
self.revoke(iu)
elif ut == retico_core.UpdateType.COMMIT:
self.commit(iu)
if self.current_input and self.input_committed():
if len(self.input_text().strip()) < 3:
self.current_input = []
return
current_text = self.input_text()
self.current_input = []
self.current_text = current_text
def get_response(self, input):
"""Generates a response to the given input. The response is generated by the
OpenAI API.
If the context length is exceeded, the max_history is reduced by one and the
function is called again.
Args:
input (str): The input for which a response should be generated.
"""
messages = [
{"role": "system", "content": self.system_prompt},
]
for previous_input, previous_response in self.history[-self.max_history :]:
messages.append({"role": "user", "content": previous_input})
messages.append({"role": "assistant", "content": previous_response})
messages.append({"role": "user", "content": input})
try:
completion = openai.ChatCompletion.create(
model=self.model,
messages=messages,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
stream=True,
)
return completion
except openai.InvalidRequestError as e:
if e.code == "context_length_exceeded":
print(
f"Context length exceeded. Reducing max_history to {self.max_history - 1}"
)
self.max_history -= 1
return self.get_response()
else:
print("Error: ", e)
except Exception as e:
print("Error: ", e)
def _generate_loop(self):
"""The GPT model is called in a loop to generate responses. The loop is
terminated once the run_loop flag is set to False.
"""
while self.run_loop:
if self.current_text != "":
completion = self.get_response(self.current_text)
current_content = ""
for chunk in completion:
choice = chunk.choices[0]
if choice["delta"].get("content"):
current_content += choice["delta"]["content"]
if " " in current_content:
split = current_content.split(" ")
for i, word in enumerate(split):
if i == len(split) - 1:
current_content = word
break
if word != "":
current_iu = self.create_iu(self.latest_input_iu)
current_iu.text = word
self.current_output.append(current_iu)
um = retico_core.UpdateMessage.from_iu(
current_iu, retico_core.UpdateType.ADD
)
self.append(um)
if choice["finish_reason"] == "stop":
um = retico_core.UpdateMessage()
if current_content != "":
current_iu = self.create_iu(self.latest_input_iu)
current_iu.text = current_content
self.current_output.append(current_iu)
um.add_iu(current_iu, retico_core.UpdateType.ADD)
for iu in self.current_output:
self.commit(iu)
um.add_iu(iu, retico_core.UpdateType.COMMIT)
self.append(um)
self.history.append((self.current_text, self.output_text()))
self.current_output = []
self.current_text = ""
time.sleep(0.1) | /retico-chatgpt-0.0.1.tar.gz/retico-chatgpt-0.0.1/retico_chatgpt/chatgpt.py | 0.715623 | 0.232288 | chatgpt.py | pypi |
import threading
import queue
import time
import wave
import platform
import pyaudio
import retico_core
CHANNELS = 1
"""Number of channels. For now, this is hard coded MONO. If there is interest to do
stereo or audio with even more channels, it has to be integrated into the modules."""
TIMEOUT = 0.01
"""Timeout in seconds used for the StreamingSpeakerModule."""
def show_audio_devices():
"""Shows all availbale audio input and output devices using pyAudio."""
p = pyaudio.PyAudio()
info = p.get_host_api_info_by_index(0)
print("Output Devices:")
for i in range(info["deviceCount"]):
device = p.get_device_info_by_host_api_device_index(0, i)
if device["maxOutputChannels"] > 0:
print(" %s (%d)" % (device["name"], device["index"]))
print("\nInput Devices:")
for i in range(info["deviceCount"]):
device = p.get_device_info_by_host_api_device_index(0, i)
if device["maxInputChannels"] > 0:
print(" %s (%d)" % (device["name"], device["index"]))
class AudioIU(retico_core.IncrementalUnit):
"""An audio incremental unit that receives raw audio data from a source.
The audio contained should be monaural.
Attributes:
creator (AbstractModule): The module that created this IU
previous_iu (IncrementalUnit): A link to the IU created before the
current one.
grounded_in (IncrementalUnit): A link to the IU this IU is based on.
created_at (float): The UNIX timestamp of the moment the IU is created.
raw_audio (bytes[]): The raw audio of this IU
rate (int): The frame rate of this IU
nframes (int): The number of frames of this IU
sample_width (int): The bytes per sample of this IU
"""
@staticmethod
def type():
return "Audio IU"
def __init__(
self,
creator=None,
iuid=0,
previous_iu=None,
grounded_in=None,
rate=None,
nframes=None,
sample_width=None,
raw_audio=None,
**kwargs
):
super().__init__(
creator=creator,
iuid=iuid,
previous_iu=previous_iu,
grounded_in=grounded_in,
payload=raw_audio,
)
self.raw_audio = raw_audio
self.rate = rate
self.nframes = nframes
self.sample_width = sample_width
def set_audio(self, raw_audio, nframes, rate, sample_width):
"""Sets the audio content of the IU."""
self.raw_audio = raw_audio
self.payload = raw_audio
self.nframes = int(nframes)
self.rate = int(rate)
self.sample_width = int(sample_width)
def audio_length(self):
"""Return the length of the audio IU in seconds.
Returns:
float: Length of the audio in this IU in seconds.
"""
return float(self.nframes) / float(self.rate)
class SpeechIU(AudioIU):
"""A type of audio incremental unit that contains a larger amount of audio
information and the information if the audio should be dispatched or not.
This IU can be processed by an AudioDispatcherModule which converts this
type of IU to AudioIU.
"""
@staticmethod
def type():
return "Speech IU"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.disptach = False
class DispatchedAudioIU(AudioIU):
"""A type of audio incremental unit that is dispatched by an
AudioDispatcherModule. It has the information of the percentual completion
of the dispatched audio. This may be useful for a dialog manager that
wants to track the status of the current dispatched audio.
"""
@staticmethod
def type():
return "Dispatched Audio IU"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.completion = 0.0
self.is_dispatching = False
def set_dispatching(self, completion, is_dispatching):
"""Set the completion percentage and the is_dispatching flag.
Args:
completion (float): The degree of completion of the current
utterance.
is_dispatching (bool): Whether or not the dispatcher is currently
dispatching
"""
self.completion = completion
self.is_dispatching = is_dispatching
class MicrophoneModule(retico_core.AbstractProducingModule):
"""A module that produces IUs containing audio signals that are captures by
a microphone."""
@staticmethod
def name():
return "Microphone Module"
@staticmethod
def description():
return "A prodicing module that records audio from microphone."
@staticmethod
def output_iu():
return AudioIU
def callback(self, in_data, frame_count, time_info, status):
"""The callback function that gets called by pyaudio.
Args:
in_data (bytes[]): The raw audio that is coming in from the
microphone
frame_count (int): The number of frames that are stored in in_data
"""
self.audio_buffer.put(in_data)
return (in_data, pyaudio.paContinue)
def __init__(self, frame_length=0.02, rate=44100, sample_width=2, **kwargs):
"""
Initialize the Microphone Module.
Args:
frame_length (float): The length of one frame (i.e., IU) in seconds
rate (int): The frame rate of the recording
sample_width (int): The width of a single sample of audio in bytes.
"""
super().__init__(**kwargs)
self.frame_length = frame_length
self.chunk_size = round(rate * frame_length)
self.rate = rate
self.sample_width = sample_width
self._p = pyaudio.PyAudio()
self.audio_buffer = queue.Queue()
self.stream = None
def process_update(self, _):
if not self.audio_buffer:
return None
try:
sample = self.audio_buffer.get(timeout=1.0)
except queue.Empty:
return None
output_iu = self.create_iu()
output_iu.set_audio(sample, self.chunk_size, self.rate, self.sample_width)
return retico_core.UpdateMessage.from_iu(output_iu, retico_core.UpdateType.ADD)
def setup(self):
"""Set up the microphone for recording."""
p = self._p
self.stream = p.open(
format=p.get_format_from_width(self.sample_width),
channels=CHANNELS,
rate=self.rate,
input=True,
output=False,
stream_callback=self.callback,
frames_per_buffer=self.chunk_size,
start=False,
)
def prepare_run(self):
if self.stream:
self.stream.start_stream()
def shutdown(self):
"""Close the audio stream."""
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.audio_buffer = queue.Queue()
class SpeakerModule(retico_core.AbstractConsumingModule):
"""A module that consumes AudioIUs of arbitrary size and outputs them to the
speakers of the machine. When a new IU is incoming, the module blocks as
long as the current IU is being played."""
@staticmethod
def name():
return "Speaker Module"
@staticmethod
def description():
return "A consuming module that plays audio from speakers."
@staticmethod
def input_ius():
return [AudioIU]
@staticmethod
def output_iu():
return None
def __init__(
self,
rate=44100,
sample_width=2,
use_speaker="both",
device_index=None,
**kwargs
):
super().__init__(**kwargs)
self.rate = rate
self.sample_width = sample_width
self.use_speaker = use_speaker
self._p = pyaudio.PyAudio()
if device_index is None:
device_index = self._p.get_default_output_device_info()["index"]
self.device_index = device_index
self.stream = None
self.time = None
def process_update(self, update_message):
for iu, ut in update_message:
if ut == retico_core.UpdateType.ADD:
self.stream.write(bytes(iu.raw_audio))
return None
def setup(self):
"""Set up the speaker for outputting audio"""
p = self._p
if platform.system() == "Darwin":
if self.use_speaker == "left":
stream_info = pyaudio.PaMacCoreStreamInfo(channel_map=(0, -1))
elif self.use_speaker == "right":
stream_info = pyaudio.PaMacCoreStreamInfo(channel_map=(-1, 0))
else:
stream_info = pyaudio.PaMacCoreStreamInfo(channel_map=(0, 0))
else:
stream_info = None
self.stream = p.open(
format=p.get_format_from_width(self.sample_width),
channels=CHANNELS,
rate=self.rate,
input=False,
output_host_api_specific_stream_info=stream_info,
output=True,
output_device_index=self.device_index,
)
def shutdown(self):
"""Close the audio stream."""
self.stream.stop_stream()
self.stream.close()
self.stream = None
class StreamingSpeakerModule(retico_core.AbstractConsumingModule):
"""A module that consumes Audio IUs and outputs them to the speaker of the
machine. The audio output is streamed and thus the Audio IUs have to have
exactly [chunk_size] samples."""
@staticmethod
def name():
return "Streaming Speaker Module"
@staticmethod
def description():
return "A consuming module that plays audio from speakers."
@staticmethod
def input_ius():
return [AudioIU]
@staticmethod
def output_iu():
return None
def callback(self, in_data, frame_count, time_info, status):
"""The callback function that gets called by pyaudio."""
if self.audio_buffer:
try:
audio_paket = self.audio_buffer.get(timeout=TIMEOUT)
return (audio_paket, pyaudio.paContinue)
except queue.Empty:
pass
return (b"\0" * frame_count * self.sample_width, pyaudio.paContinue)
def __init__(self, frame_length=0.02, rate=44100, sample_width=2, **kwargs):
"""Initialize the streaming speaker module.
Args:
frame_length (float): The length of one frame (i.e., IU) in seconds.
rate (int): The frame rate of the audio. Defaults to 44100.
sample_width (int): The sample width of the audio. Defaults to 2.
"""
super().__init__(**kwargs)
self.frame_length = frame_length
self.chunk_size = round(rate * frame_length)
self.rate = rate
self.sample_width = sample_width
self._p = pyaudio.PyAudio()
self.audio_buffer = queue.Queue()
self.stream = None
def process_update(self, update_message):
for iu, ut in update_message:
if ut == retico_core.UpdateType.ADD:
self.audio_buffer.put(iu.raw_audio)
return None
def setup(self):
"""Set up the speaker for speaking...?"""
p = self._p
self.stream = p.open(
format=p.get_format_from_width(self.sample_width),
channels=CHANNELS,
rate=self.rate,
input=False,
output=True,
stream_callback=self.callback,
frames_per_buffer=self.chunk_size,
)
def prepare_run(self):
self.stream.start_stream()
def shutdown(self):
"""Close the audio stream."""
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.audio_buffer = queue.Queue()
class AudioDispatcherModule(retico_core.AbstractModule):
"""An Audio module that takes a raw audio stream of arbitrary size and
outputs AudioIUs with a specific chunk size at the rate it would be produced
if the audio was being played.
This could be espacially useful when an agents' TTS module produces an
utterance, but this utterance should not be transmitted as a whole but in
an incremental way.
Attributes:
target_frame_length (float): The size of each output IU in seconds.
target_chunk_size (int): The size of each output IU in samples.
silence (bytes): A bytes array containing [target_chunk_size] samples
of silence that is dispatched when [continuous] is True and no input
IU is dispatched.
continuous (bool): Whether or not the dispatching should be continuous.
If True, AudioIUs with "silence" will be disptached if no input IUs
are being dispatched. If False, no IUs will be produced during
silence.
rate (int): The sample rate of the outout and the input IU.
sample_width (int): The sample with of the output and input IU.
speed (float): The speed of the dispatching. 1.0 means realtime.
dispatching_mutex (threading.Lock): The mutex if an input IU is
currently being dispatched.
audio_buffer (list): The current audio buffer containing the output IUs
that are currently dispatched.
run_loop (bool): Whether or not the dispatching loop is running.
interrupt (bool): Whether or not incoming IUs interrupt the old
dispatching
"""
@staticmethod
def name():
return "Audio Dispatching Module"
@staticmethod
def description():
return (
"A module that transmits audio by splitting it up into" "streamable pakets."
)
@staticmethod
def input_ius():
return [SpeechIU]
@staticmethod
def output_iu():
return DispatchedAudioIU
def __init__(
self,
target_frame_length=0.02,
rate=44100,
sample_width=2,
speed=1.0,
continuous=True,
silence=None,
interrupt=True,
**kwargs
):
"""Initialize the AudioDispatcherModule with the given arguments.
Args:
target_frame_length (float): The length of each output IU in seconds.
rate (int): The sample rate of the outout and the input IU.
sample_width (int): The sample with of the output and input IU.
speed (float): The speed of the dispatching. 1.0 means realtime.
continuous (bool): Whether or not the dispatching should be
continuous. If True, AudioIUs with "silence" will be dispatched
if no input IUs are being dispatched. If False, no IUs will be
produced during silence.
silence (bytes): A bytes array containing target_frame_length seconds of
of silence. If this argument is set to None, a default silence
of all zeros will be set.
interrupt (boolean): If this flag is set, a new input IU with audio
to dispatch will stop the current dispatching process. If set to
False, the "old" dispatching will be finished before the new one
is started. If the new input IU has the dispatching flag set to
False, dispatching will always be stopped.
"""
super().__init__(**kwargs)
self.target_frame_length = target_frame_length
self.target_chunk_size = round(target_frame_length * rate)
if not silence:
self.silence = b"\0" * self.target_chunk_size * sample_width
else:
self.silence = silence
self.continuous = continuous
self.rate = rate
self.sample_width = sample_width
self._is_dispatching = False
self.dispatching_mutex = threading.Lock()
self.audio_buffer = []
self.run_loop = False
self.speed = speed
self.interrupt = interrupt
def is_dispatching(self):
"""Return whether or not the audio dispatcher is dispatching a Speech
IU.
Returns:
bool: Whether or not speech is currently dispatched
"""
with self.dispatching_mutex:
return self._is_dispatching
def set_dispatching(self, value):
"""Set the dispatching value of this module in a thread safe way.
Args:
value (bool): The new value of the dispatching flag.
"""
with self.dispatching_mutex:
self._is_dispatching = value
def process_update(self, update_message):
cur_width = self.target_chunk_size * self.sample_width
# If the AudioDispatcherModule is set to intterupt mode or if the
# incoming IU is set to not dispatch, we stop dispatching and clean the
# buffer
for iu, ut in update_message:
if ut != retico_core.UpdateType.ADD:
continue
if self.interrupt or not iu.dispatch:
self.set_dispatching(False)
self.audio_buffer = []
if iu.dispatch:
# Loop over all frames (frame-sized chunks of data) in the input IU
# and add them to the buffer to be dispatched by the
# _dispatch_audio_loop
for i in range(0, iu.nframes, self.target_chunk_size):
cur_pos = i * self.sample_width
data = iu.raw_audio[cur_pos : cur_pos + cur_width]
distance = cur_width - len(data)
data += b"\0" * distance
completion = float((i + self.target_chunk_size) / iu.nframes)
if completion > 1:
completion = 1
current_iu = self.create_iu(iu)
current_iu.set_dispatching(completion, True)
current_iu.set_audio(
data, self.target_chunk_size, self.rate, self.sample_width
)
self.audio_buffer.append(current_iu)
self.set_dispatching(True)
return None
def _dispatch_audio_loop(self):
"""A method run in a thread that adds IU to the output queue."""
while self.run_loop:
with self.dispatching_mutex:
if self._is_dispatching:
if self.audio_buffer:
self.append(
retico_core.UpdateMessage.from_iu(
self.audio_buffer.pop(0), retico_core.UpdateType.ADD
)
)
else:
self._is_dispatching = False
if not self._is_dispatching: # no else here! bc line above
if self.continuous:
current_iu = self.create_iu(None)
current_iu.set_audio(
self.silence,
self.target_chunk_size,
self.rate,
self.sample_width,
)
current_iu.set_dispatching(0.0, False)
self.append(
retico_core.UpdateMessage.from_iu(
current_iu, retico_core.UpdateType.ADD
)
)
time.sleep((self.target_chunk_size / self.rate) / self.speed)
def prepare_run(self):
self.run_loop = True
t = threading.Thread(target=self._dispatch_audio_loop)
t.start()
def shutdown(self):
self.run_loop = False
self.audio_buffer = []
class AudioRecorderModule(retico_core.AbstractConsumingModule):
"""A Module that consumes AudioIUs and saves them as a PCM wave file to
disk."""
@staticmethod
def name():
return "Audio Recorder Module"
@staticmethod
def description():
return "A Module that saves incoming audio to disk."
@staticmethod
def input_ius():
return [AudioIU]
def __init__(self, filename, rate=44100, sample_width=2, **kwargs):
"""Initialize the audio recorder module.
Args:
filename (string): The file name where the audio should be recorded
to. The path to the file has to be created beforehand.
rate (int): The sample rate of the input and thus of the wave file.
Defaults to 44100.
sample_width (int): The width of one sample. Defaults to 2.
"""
super().__init__(**kwargs)
self.filename = filename
self.wavfile = None
self.rate = rate
self.sample_width = sample_width
def process_update(self, update_message):
for iu, ut in update_message:
if ut == retico_core.UpdateType.ADD:
self.wavfile.writeframes(iu.raw_audio)
def setup(self):
self.wavfile = wave.open(self.filename, "wb")
self.wavfile.setframerate(self.rate)
self.wavfile.setnchannels(CHANNELS)
self.wavfile.setsampwidth(self.sample_width)
def shutdown(self):
self.wavfile.close() | /retico-core-0.2.10.tar.gz/retico-core-0.2.10/retico_core/audio.py | 0.726911 | 0.236362 | audio.py | pypi |
import queue
import threading
import time
import enum
import copy
class UpdateType(enum.Enum):
"""The update type enum that defines all the types with which the incremental units
can be transmitted. Per default, the UpdateMessge class checks that the update type
is one of the types listed in this enum. However, the strict type checking can be
disabled and any update type may be used."""
ADD = "add"
UPDATE = "update"
REVOKE = "revoke"
COMMIT = "commit"
class IncrementalQueue(queue.Queue):
"""An abstract incremental queue.
A module may subscribe to a queue of another module. Every time a new
incremental unit (IU) is produced, the IU is put into a special queue for
every subscriber to the incremental queue. Every unit gets its own queue and
may process the items at different speeds.
Attributes:
provider (AbstractModule): The module that provides IUs for this queue.
consumer (AbstractModule): The module that consumes IUs for this queue.
maxsize (int): The maximum size of the queue, where 0 does not restrict
the size.
"""
def __init__(self, provider, consumer, maxsize=0):
super().__init__(maxsize=maxsize)
self.provider = provider
self.consumer = consumer
def remove(self):
"""Removes the queue from the consumer and the producer."""
self.provider.remove_right_buffer(self)
self.consumer.remove_left_buffer(self)
class IncrementalUnit:
"""An abstract incremental unit.
The IU may be used for ASR, NLU, DM, TT, TTS, ... It can be redefined to fit
the needs of the different module (and module-types) but should always
provide these functionalities.
The meta_data may be used when an incremental module is having additional
information because it is working in a simulated environemnt. This data can
be used by later modules to keep the simulation going.
Attributes:
creator (AbstractModule): The module that created this IU
previous_iu (IncrementalUnit): A link to the IU created before the
current one.
grounded_in (IncrementalUnit): A link to the IU this IU is based on.
created_at (float): The UNIX timestamp of the moment the IU is created.
meta_data (dict): Meta data that offers optional meta information. This
field can be used to add information that is not available for all
uses of the specific incremental unit.
"""
MAX_DEPTH = 50
"""Maximum depth of the previous_iu and grounded_in connections."""
def __init__(
self,
creator=None,
iuid=None,
previous_iu=None,
grounded_in=None,
payload=None,
**kwargs,
):
"""Initialize an abstract IU. Takes the module that created the IU as an
argument.
Args:
creator (AbstractModule): The module that created this incremental
unit.
iuid (int): The id of the IU. This should be a unique ID given by the module
that produces the incremental unit and is used to identify the IU later
on - for example when revoking an IU.
previous_iu (IncrementalUnit): A link to the incremental unit
created before the current one by the same module.
grounded_in (IncrementalUnit): A link to the incremental unit that
this one is based on.
payload: A generic payload that can be set.
"""
self.creator = creator
self.iuid = iuid
if self.iuid is None:
self.iuid = hash(self)
self.previous_iu = previous_iu
self.grounded_in = grounded_in
self._processed_list = []
self.payload = payload
self.mutex = threading.Lock()
self.committed = False
self.revoked = False
self.meta_data = {}
if grounded_in:
self.meta_data = {**grounded_in.meta_data}
self.created_at = time.time()
self._remove_old_links()
def _remove_old_links(self):
current_depth = 0
previous_iu = self.previous_iu
while previous_iu:
if current_depth == self.MAX_DEPTH:
previous_iu.previous_iu = None
previous_iu = previous_iu.previous_iu
current_depth += 1
current_depth = 0
grounded_in = self.grounded_in
while grounded_in:
if current_depth == self.MAX_DEPTH:
grounded_in.grounded_in = None
grounded_in = grounded_in.grounded_in
current_depth += 1
def age(self):
"""Returns the age of the IU in seconds.
Returns:
float: The age of the IU in seconds
"""
return time.time() - self.created_at
def older_than(self, s):
"""Return whether the IU is older than s seconds.
Args:
s (float): The time in seconds to check against.
Returns:
bool: Whether or not the age of the IU exceeds s seconds.
"""
return self.age() > s
def processed_list(self):
"""Return a list of all modules that have already processed this IU.
The returned list is a copy of the list held by the IU.
Returns:
list: A list of all modules that have alread processed this IU.
"""
with self.mutex:
return list(self._processed_list)
def set_processed(self, module):
"""Add the module to the list of modules that have already processed
this IU.
Args:
module (AbstractModule): The module that has processed this IU.
"""
if not isinstance(module, AbstractModule):
raise TypeError("Given object is not a module!")
with self.mutex:
self._processed_list.append(module)
def is_processed_by(self, module):
"""Return True if the IU is processed by the given module.
If the given object is a module that has not processed this IU or is not
a module it returns False.
Args:
module (AbstractModule): The module to test whether or not it has
processed the IU
Returns:
bool: Whether or not the module has processed the IU.
"""
with self.mutex:
return module in self._processed_list
def __repr__(self):
return "%s - (%s): %s" % (
self.type(),
self.creator.name(),
str(self.payload)[0:10],
)
def __eq__(self, other):
if not isinstance(other, IncrementalUnit):
return False
return self.iuid == other.iuid
@staticmethod
def type():
"""Return the type of the IU in a human-readable format.
Returns:
str: The type of the IU in a human-readable format.
"""
raise NotImplementedError()
class UpdateMessage:
"""A class that encapsulates multiple incremental units and their update type. The
update types can be any of the ones defined in the enum UpdateType"""
def __init__(self):
"""Initializes the update message with no IU added.
To initialize with a single IU use the classmethod "from_iu" or for a list of
IUs use the classmethod "from_ius".
"""
self._msgs = [] # First element of tuple is IU, second is UpdateType
self._counter = -1
def __len__(self):
return len(self._msgs)
@classmethod
def from_iu(cls, iu, update_type):
"""Initializes the update message with an initial pair of incremental unit and
update type.
Args:
iu (IncrementalUnit): The first incremental unit of the update message
update_type (UpdateType): The update type of the incremental unit.
"""
um = UpdateMessage()
um.add_iu(iu, update_type)
return um
@classmethod
def from_iu_list(cls, self, iu_list):
"""Initializes the update message with a list of tuples containing the update
type and incremental units in the format (IncrementalUnit, UpdateType)
Args:
iu_list (list): A list of IncrementalUnit-UpdateType-tuples in the format
(IncrementalUnit, UpdateType) that will be added to the update message.
"""
um = UpdateMessage()
um.add_ius(iu_list)
return um
def __iter__(self):
return self
def __next__(self):
self._counter += 1
if self._counter == len(self._msgs):
self._counter = -1
raise StopIteration
return self._msgs[self._counter]
def add_iu(self, iu, update_type, strict_update_type=True):
"""Adds an incremental unit to the update message with the given update type.
If a single UpdateType-IncrementalUnit-Tuple raises a TypeError or a ValueError,
none of the units will be added to the update message.
Args:
iu (IncrementalUnit): The incremental unit to be added to the update message
update_type (UpdateType): The type of the update the should be associated
with the incremental unit.
strict_update_type (bool): Whether the update type should be checked and
converted to type UpdateType. If the given argument is not of type
UpdateType or a str that can be converted to UpdateType, a ValueError
is raised.
Raises:
TypeError: When the given incremental unit is not of type IncrementalUnit
or if the update_type does not correspond to an update type.
ValueError: When the given udpate type is not a valid update type or the
given argument cannot be converted to an UpdateType. Only applies if the
strict_update_type flag is set.
"""
if not isinstance(iu, IncrementalUnit):
raise TypeError("IU is of type %s but should be IncrementalUnit" % type(iu))
if strict_update_type and not isinstance(update_type, UpdateType):
update_type = UpdateType(update_type)
self._msgs.append((iu, update_type))
def add_ius(self, iu_list, strict_update_type=True):
"""Adds a list of incremental units and according update types to the update
message.
Args:
iu_list (list): A list containing tuples of update types and incremental
units in the format (IncrementalUnit, UpdateType).
strict_update_type (bool): Whether the update type should be checked and
converted to type UpdateType. If the given argument is not of type
UpdateType or a str that can be converted to UpdateType, a ValueError
is raised.
Raises:
TypeError: When the given incremental unit is not of type IncrementalUnit
or if the update_type does not correspond to an update type.
ValueError: When the given udpate type is not a valid update type or the
given argument cannot be converted to an UpdateType. Only applies if the
strict_update_type flag is set.
"""
for update_type, iu in iu_list:
if not isinstance(iu, IncrementalUnit):
raise TypeError(
"IU is of type %s but should be IncrementalUnit" % type(iu)
)
if strict_update_type and not isinstance(update_type, UpdateType):
UpdateType(update_type)
for update_type, iu in iu_list:
if strict_update_type and not isinstance(update_type, UpdateType):
update_type = UpdateType(update_type)
self._msgs.append((iu, update_type))
def has_valid_ius(self, iu_classes):
"""Checks whether the IUs in this update message are all of the type provided in
the ius argument.
Args:
iu_classes (list or class): A list of incremental unit classes or a
single incremental unit class that should be checked against.
Returns:
bool: Returns true if all the incremental unit in the udpate message are
instances of the iu_classes given in the parameter.
"""
if iu_classes is None:
return False
if not isinstance(iu_classes, list):
iu_classes = [iu_classes]
for iu in self.incremental_units():
if not isinstance(iu, tuple(iu_classes)):
return False
return True
def update_types(self):
"""A generator that iterates of all the update types of the update message,
ignoring the incemental units
"""
for _, ut in self._msgs:
yield ut
def incremental_units(self):
"""A generator that iterates of all the incremental units of the update message,
ignoring the update types.
"""
for iu, _ in self._msgs:
yield iu
def set_processed(self, module):
"""Sets all the incremental units of the update message as processed by the
module that is given
Args:
module (IncrementalModule): The module that has processed the incremental
units of this update message."""
for iu in self.incremental_units():
iu.set_processed(module)
class AbstractModule:
"""An abstract module that is able to incrementally process data."""
EVENT_PROCESS_IU = "process_iu"
EVENT_PROCESS_UPDATE_MESSAGE = "process_update_message"
EVENT_SUBSCRIBE = "subscribe"
EVENT_START = "start"
EVENT_STOP = "stop"
QUEUE_TIMEOUT = 0.01
"""Timeout in seconds for the incremental queues as not to block processing."""
@staticmethod
def name():
"""Return the human-readable name of the module.
Returns:
str: A string containing the name of the module
"""
raise NotImplementedError()
@staticmethod
def description():
"""Return the human-readable description of the module.
Returns:
str: A string containing the description of the module
"""
raise NotImplementedError()
@staticmethod
def input_ius():
"""Return the list of IU classes that may be processed by this module.
If an IU is passed to the module that is not in this list or a subclass
of this list, an error is thrown when trying to process that IU.
Returns:
list: A list of classes that this module is able to process.
"""
raise NotImplementedError()
@staticmethod
def output_iu():
"""Return the class of IU that this module is producing.
Returns:
class: The class of IU this module is producing.
"""
raise NotImplementedError()
def get_init_arguments(self):
"""Returns the arguments of the init function to create the current
instance of the Module.
Returns:
dict: A dictionary containing all the necessary arguments to create
the current instance of the module.
"""
d = {}
valid_types = (int, float, bool, str, dict) # Only serializable types.
for k, v in self.__dict__.items():
if isinstance(v, valid_types):
d[k] = v
return d
def __init__(self, queue_class=IncrementalQueue, meta_data={}, **kwargs):
"""Initialize the module with a default IncrementalQueue.
Args:
queue_class (IncrementalQueue): A queue class that should be used
instead of the standard queue class. If the given object does
not inherit from IncrementalQueue, the standard IncrementalQueue
is used.
meta_data (dict): A dict with meta data about the module. This may
be coordinates of the visualization of this module or other
auxiliary information.
"""
self._right_buffers = []
self._is_running = False
self._previous_iu = None
self._left_buffers = []
self.mutex = threading.Lock()
self.events = {}
self.current_input = []
self.current_output = []
self.meta_data = {}
if meta_data:
self.meta_data = meta_data
self.queue_class = IncrementalQueue
if issubclass(queue_class, IncrementalQueue):
self.queue_class = queue_class
self.iu_counter = 0
def revoke(self, iu, remove_revoked=True):
"""Revokes an IU form the list of the current_input or current_output, depending
on in which list it is found.
Args:
iu (IncrmentalUnit): The incremental unit to revoke.
remove_revoked (bool): Whether the revoked incremental unit should be
deleted from the current_input or current_output list or if only the
revoked flag should be set.
"""
if iu in self.current_input:
self.current_input[self.current_input.index(iu)].revoked = True
if remove_revoked:
self.current_input.remove(iu)
if iu in self.current_output:
self.current_output[self.current_output.index(iu)].revoked = True
if remove_revoked:
self.current_output.remove(iu)
def commit(self, iu):
"""Sets an IU as committed from the list of the current_input or current_output,
depending on where it is found.
Args:
iu (IncrementalUnit): The incremental unit to set as committed.
"""
if iu in self.current_input:
self.current_input[self.current_input.index(iu)].committed = True
if iu in self.current_output:
self.current_output[self.current_output.index(iu)].committed = True
def input_committed(self):
"""Checks whether all IUs in the input are committed.
Returns:
bool: True when all IUs in the current_input is committed, False otherwise.
"""
for ciu in self.current_input:
if not ciu.committed:
return False
return True
def add_left_buffer(self, left_buffer):
"""Add a new left buffer for the module.
This method stops the execution of the module pipeline if it is running.
Args:
left_buffer (IncrementalQueue): The left buffer to add to the
module.
"""
if not left_buffer or not isinstance(left_buffer, IncrementalQueue):
return
if self._is_running:
self.stop()
self._left_buffers.append(left_buffer)
def remove_left_buffer(self, left_buffer):
"""Remove a left buffer from the module.
This method stops the execution of the module pipeline if it is running.
Args:
left_buffer (IncrementalQueue): The left buffer to remove from the
module.
"""
if self._is_running:
self.stop()
self._left_buffers.remove(left_buffer)
def left_buffers(self):
"""Returns the list of left buffers of the module.
Returns:
list: The left buffers of the module.
"""
return list(self._left_buffers)
def add_right_buffer(self, right_buffer):
"""Add a new right buffer for the module.
This method stops the execution of the module pipeline if it is running.
Args:
right_buffer (IncrementalQueue): The right buffer to add to the
module.
"""
if not right_buffer or not isinstance(right_buffer, IncrementalQueue):
return
if self._is_running:
self.stop()
self._right_buffers.append(right_buffer)
def remove_right_buffer(self, right_buffer):
"""Remove a right buffer from the module.
This method stops the execution of the module pipeline if it is running.
Args:
right_buffer (IncrementalQueue): The right buffer to remove from the
module.
"""
if self._is_running:
self.stop()
self._right_buffers.remove(right_buffer)
def right_buffers(self):
"""Return the right buffers of the module.
Note that the returned list is only a shallow copy. Modifying the list
does not alter the internal state of the module (but modifying the
queues in that list does).
Returns:
list: A list of the right buffers, each queue corresponding to an
input of another module.
"""
return list(self._right_buffers)
def append(self, update_message):
"""Append an update message to all queues.
If update_message is None or there are no IUs in the update message, the method
returns without doing anything.
Args:
update_message (UpdateMessage): The update message that should be added to
all output queues. May be None.
"""
if not update_message:
return
if not isinstance(update_message, UpdateMessage):
raise TypeError(
"Update message is of type %s but should be UpdateMessage"
% type(update_message)
)
for q in self._right_buffers:
q.put(copy.copy(update_message))
def subscribe(self, module, q=None):
"""Subscribe a module to the queue.
It returns a queue where the IUs for that module are placed. The queue
is not shared with other modules. By default this method creates a new
queue, but it may use an alternative queue given in parameter 'q'.
Args:
module (AbstractModule): The module that wants to subscribe to the
output of the module.
q (IncrementalQueue): A optional queue that is used. If q is None,
the a new queue will be used"""
if not q:
self.event_call(self.EVENT_SUBSCRIBE, {"module": module})
q = self.queue_class(self, module)
module.add_left_buffer(q)
self._right_buffers.append(q)
return q
def remove_from_rb(self, module):
"""Removes the connection to a module from the right buffers.
This method removes all queues between this module and the given module
from the right buffer of this module and the left buffer of the given
module.
This method stops the execution of the module.
Args:
module: A module that is subscribed to this module
"""
if self._is_running:
self.stop()
# We get a copy of the buffers because we are mutating it
rbs = self.right_buffers()
for buffer in rbs:
if buffer.consumer == module:
buffer.remove()
def remove_from_lb(self, module):
"""Removes the connection to a module from the left buffers.
This method removes all queues between this module and the given module
from the left buffer of this module and the right buffer of the given
module.
This method stops the execution of the module.
Args:
module: A module that this module is subscribed to
"""
if self._is_running:
self.stop()
# We get a copy of the buffers because we are mutating it
lbs = self.left_buffers()
for buffer in lbs:
if buffer.producer == module:
buffer.remove()
def remove(self):
"""Removes all connections to all modules.
This methods removes all queues from the left buffer and right buffer.
The queues are also removed from the buffers of the connected modules.
This method can be used to remove a module completely from a network.
This method stops the execution of the module.
"""
if self._is_running:
self.stop()
lbs = self.left_buffers()
rbs = self.right_buffers()
for buffer in lbs:
buffer.remove()
for buffer in rbs:
buffer.remove()
def process_update(self, update_message):
"""Processes the update message given and returns a new update message that can
be appended to the output queues.
Note that the incremental units in the update message that is returned should be
created by the create_iu method so that they have correct references to the
previous incremental units generated by this module and the IUs that they are
based on.
It is important that the process_update method processes the update messages
in a timely manner (in regards to the production of update messages from the
preceding module) so that the incremental queues do not overflow.
Args:
update_message (UpdateMEssage): The update message that should be processed
by the module.
Returns:
UpdateMessage: An update message that is produced by this module based
on the incremental units that were given. May be None.
"""
raise NotImplementedError()
def _run(self):
self.prepare_run()
self._is_running = True
while self._is_running:
for buffer in self._left_buffers:
with self.mutex:
try:
update_message = buffer.get(timeout=self.QUEUE_TIMEOUT)
except queue.Empty:
update_message = None
if update_message:
if not update_message.has_valid_ius(self.input_ius()):
raise TypeError("This module can't handle this type of IU")
output_message = self.process_update(update_message)
update_message.set_processed(self)
for input_iu in update_message.incremental_units():
self.event_call(self.EVENT_PROCESS_IU, {"iu": input_iu})
self.event_call(
self.EVENT_PROCESS_UPDATE_MESSAGE,
{"update_message": update_message},
)
if output_message:
if output_message.has_valid_ius(self.output_iu()):
self.append(output_message)
else:
raise TypeError(
"This module should not produce IUs of this type."
)
self.shutdown()
def is_valid_input_iu(self, iu):
"""Return whether the given IU is a valid input IU.
Valid is defined by the list given by the input_ius function. The given
IU must be one of the types defined in that list or be a subclass of it.
Args:
iu (IncrementalUnit): The IU to be checked.
Raises:
TypeError: When the given object is not of type IncrementalUnit.
Returns:
bool: Whether the given iu is a valid one for this module.
"""
if not isinstance(iu, IncrementalUnit):
raise TypeError("IU is of type %s but should be IncrementalUnit" % type(iu))
for valid_iu in self.input_ius():
if isinstance(iu, valid_iu):
return True
return False
def setup(self):
"""This method is called before the module is run. This method can be
used to set up the pipeline needed for processing the IUs.
However, after the setup method is called, the module may not
immediately be run. For code that should be executed immediately before
a module is run use the `prepare_run` method.
"""
pass
def prepare_run(self):
"""A method that is executed just before the module is being run.
While this method may seem similar to `setup`, it is called immediately
before the run routine. This method may be used in producing modules to
initialize the generation of output IUs. Other than the `setup` method,
this method makes sure that other modules in the network are also
already setup.
"""
pass
def shutdown(self):
"""This method is called before the module is stopped. This method can
be used to tear down the pipeline needed for processing the IUs."""
pass
def run(self, run_setup=True):
"""Run the processing pipeline of this module in a new thread. The
thread can be stopped by calling the stop() method.
Args:
run_setup (bool): Whether or not the setup method should be executed
before the thread is started.
"""
if run_setup:
self.setup()
for q in self.right_buffers():
with q.mutex:
q.queue.clear()
t = threading.Thread(target=self._run)
t.start()
self.event_call(self.EVENT_START)
def stop(self, clear_buffer=True):
"""Stops the execution of the processing pipeline of this module at the
next possible point in time. This may be after the next incoming IU is
processed."""
self._is_running = False
if clear_buffer:
for buffer in self.right_buffers():
while not buffer.empty():
buffer.get()
self.event_call(self.EVENT_STOP)
def create_iu(self, grounded_in=None):
"""Creates a new Incremental Unit that contains the information of the
creator (the current module), the previous IU that was created in this
module and the iu that it is based on.
Do not discard (as in not using) any IU that was created by this method,
because it will alreade have been introduced into the chain of IUs of
this module!
Args:
grounded_in (IncrementalUnit): The incremental unit that the new
unit is based on. May be None.
Returns:
IncrementalUnit: A new incremental unit with correct pointer to
unit it is grounded in and to the previous IU that was generated by
this module.
"""
new_iu = self.output_iu()(
creator=self,
iuid=f"{hash(self)}:{self.iu_counter}",
previous_iu=self._previous_iu,
grounded_in=grounded_in,
)
self.iu_counter += 1
self._previous_iu = new_iu
return new_iu
def latest_iu(self):
"""Provides reading access to the latest incremental unit that was
produced by this module.
Thus, the information received by this method might be out of date or
completely wrong (in case where a not yet initialized IU is returned).
The iu returned should not be modified in any way, because it could
still be processed by a module.
Return:
(IncrementalUnit): The latest IU that was produced by the module.
"""
return self._previous_iu
def __repr__(self):
return self.name()
def event_subscribe(self, event_name, callback):
"""
Subscribe a callback to an event with the given name. If tge event name
is "*", then the callback will be called after every event.
The callback function is given three arguments: the module that
triggered the event (AbstractModule), the name of the event (str) and a
dict (dict) that may contain data relevant to the event.
Args:
event_name (str): The name of the event to subscribe to
callback (function): A function that is called once the event occurs
"""
if not self.events.get(event_name):
self.events[event_name] = []
self.events[event_name].append(callback)
def event_call(self, event_name, data={}):
"""
Calls all callback functions that are subscribed to the given event
name with some data attached to it. The data is optional but should stay
consistent with each call of the same event.
If * is passed as the event name, no callback function is called.
Event name should be a unique identifier to the event. "*" is not
allowed as an event name.
Args:
event_name (str): The name of the event (not "*")
data (dict): Optionally some data that is relevant to the event.
"""
if data is None:
data = {}
if event_name == "*":
return
if self.events.get(event_name):
for callback in self.events[event_name]:
threading.Thread(target=callback, args=(self, event_name, data)).start()
if self.events.get("*"):
for callback in self.events["*"]:
threading.Thread(target=callback, args=(self, event_name, data)).start()
class AbstractProducingModule(AbstractModule):
"""An abstract producing module that is able to incrementally process data.
The producing module has no input queue and thus does not wait for any
input. The producing module is called continously and may return new output
when it becomes available.
"""
@staticmethod
def name():
raise NotImplementedError()
@staticmethod
def description():
raise NotImplementedError()
@staticmethod
def input_ius():
return []
@staticmethod
def output_iu():
raise NotImplementedError()
def __init__(self, queue_class=IncrementalQueue, **kwargs):
super().__init__(queue_class=IncrementalQueue, **kwargs)
def _run(self):
self.prepare_run()
self._is_running = True
while self._is_running:
with self.mutex:
output_message = self.process_update(None)
if output_message:
if output_message.has_valid_ius(self.output_iu()):
self.append(output_message)
else:
raise TypeError(
"This module should not produce IUs of this type."
)
self.shutdown()
def process_update(self, update_message):
raise NotImplementedError()
class AbstractConsumingModule(AbstractModule):
"""An abstract consuming module that is able to incrementally process data.
The consuming module consumes IUs but does not return any data.
"""
@staticmethod
def name():
raise NotImplementedError()
@staticmethod
def description():
raise NotImplementedError()
@staticmethod
def input_ius():
raise NotImplementedError()
@staticmethod
def output_iu():
return None
def subscribe(self, module, q=None):
raise ValueError("Consuming Modules do not produce any output")
def process_update(self, update_message):
raise NotImplementedError()
class AbstractTriggerModule(AbstractProducingModule):
"""An abstract trigger module that produces an update message once a trigger method
is called. Unless the module is triggered no updates are produced"""
@staticmethod
def name():
raise NotImplementedError()
@staticmethod
def description():
raise NotImplementedError()
@staticmethod
def input_ius():
return []
@staticmethod
def output_iu():
raise NotImplementedError()
def __init__(self, queue_class=IncrementalQueue, **kwargs):
super().__init__(queue_class=IncrementalQueue, **kwargs)
def _run(self):
self.prepare_run()
self._is_running = True
while self._is_running:
with self.mutex:
time.sleep(0.05)
self.shutdown()
def process_update(self, update_message):
return None
def trigger(self, data={}, update_type=UpdateType.ADD):
"""The trigger method that should produce an update message and append it to the
right buffer
Args:
data (dict): A dictionary with data that can be used for the trigger
update_type (UpdateType): The update type that the IU should have. Default
is UpdateType.ADD
"""
raise NotImplementedError() | /retico-core-0.2.10.tar.gz/retico-core-0.2.10/retico_core/abstract.py | 0.817465 | 0.347094 | abstract.py | pypi |
import retico_core
import time
class DialogueActIU(retico_core.IncrementalUnit):
"""A Dialog Act Incremental Unit.
This IU represents a Dialogue Act together with concepts and their
values. In this implementation only a single act can be expressed with a
single IU.
Attributes:
act (string): A representation of the current act as a string.
concepts (dict): A dictionary of names of concepts being mapped on to
their actual values.
"""
@staticmethod
def type():
return "Dialogue Act Incremental Unit"
def __init__(
self,
creator=None,
iuid=0,
previous_iu=None,
grounded_in=None,
payload=None,
act=None,
concepts=None,
**kwargs
):
"""Initialize the DialogueActIU with act and concepts.
Args:
act (string): A representation of the act.
concepts (dict): A representation of the concepts as a dictionary.
"""
super().__init__(
creator=creator,
iuid=iuid,
previous_iu=previous_iu,
grounded_in=grounded_in,
payload=payload,
)
self.act = act
self.concepts = {}
if concepts:
self.concepts = concepts
self.confidence = 0.0
def set_act(self, act, concepts=None, confidence=1.0):
"""Set the act and concept of the IU.
Old acts or concepts will be overwritten.
Args:
act (string): The act of the IU as a string.
concepts (dict): A dictionary containing the new concepts.
confidence (float): Confidence of the act prediction
"""
self.act = act
if concepts:
self.concepts = concepts
self.confidence = confidence
self.payload = (act, concepts)
class DispatchableActIU(DialogueActIU):
"""A Dialogue Act Incremental Unit that can has the information if it should
be dispatched once it has been transformed into speech.
Attributes:
dispatch (bool): Whether the speech resulting from this IU should be
dispatched or not.
"""
def __init__(self, dispatch=False, **kwargs):
super().__init__(**kwargs)
self.dispatch = dispatch
class EndOfTurnIU(retico_core.IncrementalUnit):
"""An incremental unit used for prediction of the end of the turn. This
information may be used by a dialogue management module to plan next turns
and enabling realistic turn taking.
"""
@staticmethod
def type():
return "End-of-Turn Incremental Unit"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.probability = 0.0
self.is_speaking = False
def set_eot(self, probability=0.0, is_speaking=False):
"""Set the end-of-turn probability and a flag if the interlocutor is
currently speaking (VAD).
Args:
probability (float): The probability that the turn is ending.
is_speaking (bool): Whether or not the interlocutor is speaking.
"""
self.is_speaking = is_speaking
self.probability = probability
class DialogueActRecorderModule(retico_core.AbstractConsumingModule):
"""A module that writes dispatched dialogue acts to file."""
@staticmethod
def name():
return "Dialogue Act Recorder Module"
@staticmethod
def description():
return "A module that writes dialogue acts into a file."
@staticmethod
def input_ius():
return [DialogueActIU, DispatchableActIU]
def __init__(self, filename, separator="\t", **kwargs):
super().__init__(**kwargs)
self.filename = filename
self.separator = separator
self.txt_file = None
def setup(self):
self.txt_file = open(self.filename, "w")
def prepare_run(self):
self.start_time = time.time()
def shutdown(self):
if self.txt_file:
self.txt_file.close()
self.txt_file = None
def process_update(self, update_message):
for iu, ut in update_message:
if ut != retico_core.UpdateType.ADD:
continue
if self.txt_file:
self.txt_file.write("dialogue_act")
self.txt_file.write(self.separator)
self.txt_file.write(str(iu.creator).split(" ")[-1])
self.txt_file.write(self.separator)
if iu.created_at < self.start_time:
self.start_time = iu.created_at
self.txt_file.write(str(int((iu.created_at - self.start_time) * 1000)))
self.txt_file.write(self.separator)
self.txt_file.write("-1")
self.txt_file.write(self.separator)
if iu.concepts.keys():
self.txt_file.write(iu.act + ":" + ",".join(iu.concepts.keys()))
else:
self.txt_file.write(iu.act)
if isinstance(iu, DispatchableActIU):
self.txt_file.write(self.separator)
self.txt_file.write(str(iu.dispatch))
self.txt_file.write("\n")
class DialogueActTriggerModule(retico_core.AbstractTriggerModule):
@staticmethod
def name():
return "Dialogue Act Trigger Module"
@staticmethod
def description():
return "A trigger module that emits a dialogue act when triggered."
@staticmethod
def output_iu():
return DispatchableActIU
def __init__(self, dispatch=True, **kwargs):
super().__init__(**kwargs)
self.dispatch = True
def trigger(self, data={}, update_type=retico_core.UpdateType.ADD):
output_iu = self.create_iu()
output_iu.dispatch = self.dispatch
output_iu.set_act(data.get("act", "greeting"), data.get("concepts", {}))
self.append(retico_core.UpdateMessage.from_iu(output_iu, update_type)) | /retico-core-0.2.10.tar.gz/retico-core-0.2.10/retico_core/dialogue.py | 0.834946 | 0.421076 | dialogue.py | pypi |
import pickle
def load(filename: str):
"""Loads a network from file and returns a list of modules in that network.
The connections between the module have been set according to the file.
Args:
filename (str): The path to the .rtc file containing a network.
Returns:
(list, list): A list of Modules that are connected and ready to be run
and a list of connections between those modules.
"""
mc_list = pickle.load(open(filename, "rb"))
module_dict = {}
module_list = []
connection_list = []
for m in mc_list[0]:
mod = m["retico_class"](**m["args"])
module_dict[m["id"]] = mod
module_list.append(mod)
for ida, idb in mc_list[1]:
module_dict[idb].subscribe(module_dict[ida])
connection_list.append((module_dict[idb], module_dict[ida]))
return (module_list, connection_list)
def load_and_execute(filename):
"""Loads a network from file and runs it.
The network is loaded via the load-Method. Before running the network, it is
setup.
The networks runs until some input on the console is given
Args:
filename (str): The path to the .rtc file containing a network.
"""
module_list, _ = load(filename)
for module in module_list:
module.setup()
for module in module_list:
module.run(run_setup=False)
input()
for module in module_list:
module.stop()
def _discover_modules(module):
discovered_lb = []
discovered_rbs = []
lbs = module.left_buffers()
for lb in lbs:
if lb and lb.provider:
discovered_lb.append(lb.provider)
for rb in module.right_buffers():
if rb and rb.consumer:
discovered_rbs.append(rb.consumer)
return set(discovered_lb), set(discovered_rbs)
def run(module):
"""Properly prepares and runs a network based on one module or a list of modules.
The network is automatically discovered so that only one module of the network has
to be given to this function for the whole network to be executed. The function
first calls the `setup` function of each module in the network and then runs all
modules.
Args:
module (Abstract Module or list): A module of the network or a list of multiple
module of the network
"""
m_list, _ = discover(module)
for m in m_list:
m.setup()
for m in m_list:
m.run(run_setup=False)
def stop(module):
"""Properlystops a network based on one module or a list of modules.
The network is automatically discovered so that only one module of the network has
to be given to this function for the whole network to be stopped.
Args:
module (Abstract Module or list): A module of the network or a list of multiple
module of the network
"""
m_list, _ = discover(module)
for m in m_list:
m.stop()
def discover(module):
"""Discovers all modules and connections from a single a list of modules.
The network is automatically discovered by traversing all left and right buffers of
the modules given. If the argment module is only a single module, the network that
is constructed consist only of modules and connections reachable by that module. A
segmented network needs a module of each part of the network.
The function returns a touple containing a list of module and a list of connections
between the modules. The connections are touples containing the providing module of
the connection as the first element and the receiving module as the second element.
Args:
module (AbstractModule or list): A module of the network or a list of multiple
modules of the network
Returns:
list, list: A list of modules in the first return value and
a list of connections in the second return value.
"""
if not isinstance(module, list):
module = [module]
undiscovered = set(module)
discovered = []
m_list = []
c_list = []
while undiscovered:
current_module = undiscovered.pop()
discovered.append(current_module)
lbs, rbs = _discover_modules(current_module)
for mod in lbs:
if mod not in discovered:
undiscovered.add(mod)
for mod in rbs:
if mod not in discovered:
undiscovered.add(mod)
m_list.append(current_module)
for buf in current_module.right_buffers():
c_list.append((buf.consumer, buf.provider))
return m_list, c_list
def save(module, filename):
"""Saves a network to file given a module or a list of modules.
The network is automatically detected by traversing all left and right
buffers of the modules given. If the argument module is only a single
module, the network that is being saved consists only of the module
reachable from this module. If a network should be saved that is splitted
into multiple parts, at least one module of each split has to be included
into the module-list.
Args:
module (AbstractModule or list): A module of the network or a list of
multiple modules of the network.
filename (str): The path to where the network should be stored. This
excludes the file-ending .rtc that will be automatically added by
this function.
"""
if not isinstance(module, list):
module = [module]
undiscovered = set(module)
discovered = []
m_list = []
c_list = []
while undiscovered:
current_module = undiscovered.pop()
discovered.append(current_module)
lbs, rbs = _discover_modules(current_module)
for mod in lbs:
if mod not in discovered:
undiscovered.add(mod)
for mod in rbs:
if mod not in discovered:
undiscovered.add(mod)
current_dict = {}
current_dict["widget_name"] = current_module.name()
current_dict["retico_class"] = current_module.__class__
current_dict["args"] = current_module.get_init_arguments()
current_dict["id"] = id(current_module)
current_dict["meta"] = current_module.meta_data
m_list.append(current_dict)
for buf in current_module.right_buffers():
c_list.append((id(buf.consumer), id(buf.provider)))
pickle.dump([m_list, c_list], open("%s.rtc" % filename, "wb")) | /retico-core-0.2.10.tar.gz/retico-core-0.2.10/retico_core/network.py | 0.728652 | 0.558628 | network.py | pypi |
import retico_core
def get_text_increment(module, new_text):
"""Compares the full text given by the asr with the IUs that are already
produced (current_output) and returns only the increment from the last update. It
revokes all previously produced IUs that do not match.
For example, if the ``current_output`` of the module consists of the follwing IUs:
[The] [quick] [bright]
and the ``new_text`` is:
"The quick brown fox"
The function would return a new UpdateMessage
UpdateMessage([bright], REVOKE)
and the follwing new tokens:
["brown", "fox"]
This function does not create UPDATE messages (e.g., updating [bright] to [brown] in
the example above), but rather REVOKEs all unfitting IUs.
Args:
module (IncrementalModule): The incremental module that produces ``TextIU`` s
new_text (str): The full new text that should be incrementalized
Returns:
UpdateMessage, list: Returns an update message containing the IUs that were
revoked (with the REVOKE update type) and a list of new tokens (strings) for
which new IUs should be created.
"""
um = retico_core.UpdateMessage()
tokens = new_text.strip().split(" ")
if tokens == [""]:
return um, []
new_tokens = []
iu_idx = 0
token_idx = 0
while token_idx < len(tokens):
if iu_idx >= len(module.current_output):
new_tokens.append(tokens[token_idx])
token_idx += 1
else:
current_iu = module.current_output[iu_idx]
iu_idx += 1
if tokens[token_idx] == current_iu.text:
token_idx += 1
else:
current_iu.revoked = True
um.add_iu(current_iu, retico_core.UpdateType.REVOKE)
module.current_output = [iu for iu in module.current_output if not iu.revoked]
return um, new_tokens
class TextIU(retico_core.IncrementalUnit):
"""An IU that contains text."""
@staticmethod
def type():
return "Text IU"
def get_text(self):
"""Return the text contained in the IU.
Returns:
str: The text contained in the IU.
"""
return self.payload
def set_text(self, text):
"""Sets the text contained in the IU.
Args:
text (str): The new text of the IU
"""
self.payload = text
text = property(get_text, set_text)
class GeneratedTextIU(TextIU):
"""An IU that contains generated text.
This includes information about whether the text should be dispatched once
it has been transformed into speech."""
@staticmethod
def type():
return "Generated Text IU"
def __init__(self, dispatch=False, **kwargs):
super().__init__(**kwargs)
self.dispatch = dispatch
class SpeechRecognitionIU(TextIU):
"""An IU that contains information about recognized speech."""
@staticmethod
def type():
return "Speech Recgonition IU"
def __init__(
self, creator, iuid=0, previous_iu=None, grounded_in=None, payload=None
):
super().__init__(
creator,
iuid=iuid,
previous_iu=previous_iu,
grounded_in=grounded_in,
payload=payload,
)
self.predictions = None
self.stability = None
self.confidence = None
self.payload = payload
self.text = None
self.final = False
def set_asr_results(self, predictions, text, stability, confidence, final):
"""Set the asr results for the SpeechRecognitionIU.
Args:
predictions (list): A list of predictions. This will also set the
payload. The last prediction in this list should be the latest
and best prediction.
text (str): The text of the latest prediction
stability (float): The stability of the latest prediction
confidence (float): The confidence in the latest prediction
final (boolean): Whether the prediction is final
"""
self.predictions = predictions
self.payload = predictions
self.text = text
self.stability = stability
self.confidence = confidence
self.final = final
def get_text(self):
return self.text
class TextRecorderModule(retico_core.AbstractConsumingModule):
"""A module that writes the received text into a file."""
@staticmethod
def name():
return "Text Recorder Module"
@staticmethod
def description():
return "A module that writes received TextIUs to file"
@staticmethod
def input_ius():
return [TextIU, GeneratedTextIU, SpeechRecognitionIU]
def __init__(self, filename, separator="\t", **kwargs):
super().__init__(**kwargs)
self.filename = filename
self.separator = separator
self.txt_file = None
def setup(self):
self.txt_file = open(self.filename, "w")
def shutdown(self):
if self.txt_file:
self.txt_file.close()
self.txt_file = None
def process_update(self, update_message):
for iu, ut in update_message:
if ut != retico_core.UpdateType.ADD:
continue
if self.txt_file:
self.txt_file.write(str(iu.grounded_in.creator))
self.txt_file.write(self.separator)
self.txt_file.write(str(iu.created_at))
self.txt_file.write(self.separator)
self.txt_file.write(iu.get_text())
if isinstance(iu, GeneratedTextIU):
self.txt_file.write(self.separator)
self.txt_file.write(str(iu.dispatch))
if isinstance(iu, SpeechRecognitionIU):
self.txt_file.write(self.separator)
self.txt_file.write(str(iu.predictions))
self.txt_file.write(self.separator)
self.txt_file.write(str(iu.stability))
self.txt_file.write(self.separator)
self.txt_file.write(str(iu.confidence))
self.txt_file.write(self.separator)
self.txt_file.write(str(iu.final))
self.txt_file.write("\n")
class TextTriggerModule(retico_core.AbstractTriggerModule):
"""A trigger module that creates a TextIU once its trigger function is called."""
@staticmethod
def name():
return "Text Trigger Module"
@staticmethod
def description():
return "A trigger module that creates a TextIU once its triggered"
@staticmethod
def output_iu():
return GeneratedTextIU
def __init__(self, dispatch=True, **kwargs):
super().__init__(**kwargs)
self.dispatch = dispatch
def trigger(self, data={}, update_type=retico_core.UpdateType.ADD):
text = data.get("text", "This is a trigger test")
output_iu = self.create_iu()
output_iu.payload = text
output_iu.dispatch = self.dispatch
self.append(retico_core.UpdateMessage.from_iu(output_iu, update_type))
class TextDispatcherModule(retico_core.AbstractModule):
"""
A Moduel that turns SpeechRecognitionIUs or TextIUs into GeneratedTextIUs
that have the dispatch-flag set.
"""
@staticmethod
def name():
return "ASR to TTS Module"
@staticmethod
def description():
return "A module that uses SpeechRecognition IUs and outputs dispatchable IUs"
@staticmethod
def input_ius():
return [SpeechRecognitionIU, TextIU]
@staticmethod
def output_iu():
return GeneratedTextIU
def __init__(self, dispatch_final=True, **kwargs):
super().__init__(**kwargs)
self.dispatch_final = dispatch_final
def process_update(self, update_message):
um = retico_core.UpdateMessage()
for iu, ut in update_message:
output_iu = self.create_iu(iu)
output_iu.payload = iu.get_text()
output_iu.dispatch = True
if isinstance(iu, SpeechRecognitionIU) and self.dispatch_final:
output_iu.dispatch = iu.final
output_iu.committed = iu.committed
um.add_iu(output_iu, ut)
return um
class IncrementalizeASRModule(retico_core.AbstractModule):
"""A module that takes the output of a non-incremental ASR module, where each IU
contains the full text of the speech recognition and produces increments based on
the difference to the last output.
"""
@staticmethod
def name():
return "Incrementalize ASR Module"
@staticmethod
def description():
return (
"A module that takes SpeechRecognitionIUs and emits only the "
+ "increments from the previous iu"
)
@staticmethod
def input_ius():
return [SpeechRecognitionIU]
@staticmethod
def output_iu():
return SpeechRecognitionIU
def __init__(self, threshold=0.8, **kwargs):
super().__init__(**kwargs)
self.threshold = threshold
def process_update(self, update_message):
um = retico_core.UpdateMessage()
for iu, ut in update_message:
if ut != retico_core.UpdateType.ADD:
continue
if iu.stability < self.threshold and iu.confidence == 0.0:
continue
current_text = iu.get_text()
if self.current_input:
um, current_text = retico_core.text.get_text_increment(
self, current_text
)
if current_text.strip() == "":
continue
output_iu = self.create_iu(iu)
# Just copy the input IU
output_iu.set_asr_results(
iu.predictions,
current_text,
iu.stability,
iu.confidence,
iu.final,
)
self.current_input.append(output_iu)
if output_iu.final:
self.current_input = []
output_iu.committed = True
um.add_iu(output_iu, retico_core.UpdateType.ADD)
return um
class EndOfUtteranceModule(retico_core.AbstractModule):
"""A module that looks for the "final" flag of a SpeechRecognitionIU and forwards
an EndOfTurnIU when the SpeechRecognition detected that the utterance is finished.
"""
@staticmethod
def name():
return "End of Utterance Module"
@staticmethod
def description():
return "A module that forwards the end of utterance from the ASR output"
@staticmethod
def input_ius():
return [SpeechRecognitionIU]
@staticmethod
def output_iu():
return retico_core.dialogue.EndOfTurnIU
def __init__(self, **kwargs):
super().__init__(**kwargs)
def process_update(self, update_message):
um = retico_core.UpdateMessage()
for iu, ut in update_message:
if ut == retico_core.UpdateType.ADD:
if iu.final:
outiu = self.create_iu(iu)
outiu.set_eot(1.0, False)
um.add_iu(outiu, retico_core.UpdateType.ADD)
return um | /retico-core-0.2.10.tar.gz/retico-core-0.2.10/retico_core/text.py | 0.831998 | 0.368747 | text.py | pypi |
import queue
import threading
import retico_core
from retico_core.text import SpeechRecognitionIU
from retico_core.audio import AudioIU
from google.cloud import speech as gspeech
class GoogleASRModule(retico_core.AbstractModule):
"""A Module that recognizes speech by utilizing the Google Speech API."""
def __init__(
self, language="en-US", threshold=0.8, nchunks=20, rate=44100, **kwargs
):
"""Initialize the GoogleASRModule with the given arguments.
Args:
language (str): The language code the recognizer should use.
threshold (float): The amount of stability needed to forward an update.
nchunks (int): Number of chunks that should trigger a new
prediction.
rate (int): The framerate of the input audio
"""
super().__init__(**kwargs)
self.language = language
self.nchunks = nchunks
self.rate = rate
self.client = None
self.streaming_config = None
self.responses = []
self.threshold = threshold
self.audio_buffer = queue.Queue()
self.latest_input_iu = None
@staticmethod
def name():
return "Google ASR Module"
@staticmethod
def description():
return "A Module that incrementally recognizes speech."
@staticmethod
def input_ius():
return [AudioIU]
@staticmethod
def output_iu():
return SpeechRecognitionIU
def process_update(self, update_message):
for iu, ut in update_message:
if ut != retico_core.UpdateType.ADD:
continue
self.audio_buffer.put(iu.raw_audio)
if not self.latest_input_iu:
self.latest_input_iu = iu
return None
@staticmethod
def _extract_results(response):
predictions = []
text = None
stability = 0.0
confidence = 0.0
final = False
for result in response.results:
if not result or not result.alternatives:
continue
if not text:
final = result.is_final
stability = result.stability
text = result.alternatives[0].transcript
confidence = result.alternatives[0].confidence
predictions.append(
(
result.alternatives[0].transcript,
result.stability,
result.alternatives[0].confidence,
result.is_final,
)
)
return predictions, text, stability, confidence, final
def _generator(self):
while self._is_running:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self.audio_buffer.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self.audio_buffer.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b"".join(data)
def _produce_predictions_loop(self):
requests = (
gspeech.StreamingRecognizeRequest(audio_content=content)
for content in self._generator()
)
self.responses = self.client.streaming_recognize(
self.streaming_config, requests
)
for response in self.responses:
p, t, s, c, f = self._extract_results(response)
if p:
um = retico_core.UpdateMessage()
if s < self.threshold and c == 0.0 and not f:
continue
current_text = t
um, new_tokens = retico_core.text.get_text_increment(self, current_text)
if len(new_tokens) == 0:
if not f:
continue
else:
output_iu = self.create_iu(self.latest_input_iu)
output_iu.set_asr_results(p, "", s, c, f)
output_iu.committed = True
self.current_output = []
um.add_iu(output_iu, retico_core.UpdateType.ADD)
for i, token in enumerate(new_tokens):
output_iu = self.create_iu(self.latest_input_iu)
eou = f and i == len(new_tokens) - 1
output_iu.set_asr_results(p, token, 0.0, 0.99, eou)
if eou:
output_iu.committed = True
self.current_output = []
else:
self.current_output.append(output_iu)
um.add_iu(output_iu, retico_core.UpdateType.ADD)
self.latest_input_iu = None
self.append(um)
def setup(self):
self.client = gspeech.SpeechClient()
config = gspeech.RecognitionConfig(
encoding=gspeech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=self.rate,
language_code=self.language,
)
self.streaming_config = gspeech.StreamingRecognitionConfig(
config=config, interim_results=True
)
def prepare_run(self):
t = threading.Thread(target=self._produce_predictions_loop)
t.start()
def shutdown(self):
self.audio_buffer.put(None) | /retico-googleasr-0.1.5.tar.gz/retico-googleasr-0.1.5/retico_googleasr/googleasr.py | 0.744935 | 0.191101 | googleasr.py | pypi |
import http.client
import json
import os
import subprocess
import base64
import random
import wave
from hashlib import blake2b
import time
import threading
import retico_core
# Helper functions ==============
def get_gcloud_token():
"""Return the gcloud access token as a string.
This functions requires the gcloud executable to be available in the path
variable.
Return (str): The gcloud access token
"""
outs = subprocess.check_output(
["gcloud", "auth", "application-default", "print-access-token"]
)
outs = outs.decode("utf-8")
return outs.strip()
# =================
class GoogleTTS:
"""
A google TTS class that is able to return the audio as pcm.
This class relies on gcloud and ffmpeg to be installed and available.
"""
CACHING_DIR = "~/.cache/gtts_cache/"
TEMP_DIR = "~/.cache/tmp_tts"
TEMP_NAME = "tmp_tts_%s" % random.randint(1000, 10000)
def __init__(
self,
language_code="en-US",
voice_name="en-US-Wavenet-A",
speaking_rate=1.4,
caching=True,
):
"""
Creates a Google TTS instance with the specified language_code and voice_name.
The valid values can be looked up [here](https://cloud.google.com/text-to-speech/docs/voices).
Args:
language_code (str): The language code specified by google cloud (e.g. en-US or de-DE)
voice_name (str): The name of the voice specified by google cloude
caching (bool): Whether the tts should cache the speech.
"""
self.language_code = language_code
self.voice_name = voice_name
self.ssml_gender = "FEMALE"
self.caching = caching
self._gcloud_token = None
self.speaking_rate = speaking_rate
self.wav_sample_rate = 44100 # 44100 sample rate / See ffmpeg
self.wav_codec = "pcm_s16le" # 16-bit little endian codec / See ffmpeg
# Create caching directory if it not already exists
if not os.path.exists(self.CACHING_DIR):
os.makedirs(self.CACHING_DIR)
def gcloud_token(self, use_cache=True):
"""Return the gcloud token.
The gcloud token is cached, so it is only retrieved once for every instance of the GoogleTTS class
Args:
use_cache (bool): Whether the method should use the cache or if it should retrieve the token from the
gcloud application.
Returns (str): The gcloud access token.
"""
if not use_cache or self._gcloud_token is None:
self._gcloud_token = get_gcloud_token()
return self._gcloud_token
def get_cache_path(self, text):
"""
Creates a hash of the given TTS settings and returns a unique path to the cached version of the synthesis.
This method does not check for the cached file to exist!
Args:
text (str): The text to synthesis (this is included in the hash that is used for the cache path)
Returns (str): Path to a cached version of that synthesis.
"""
h = blake2b(digest_size=16)
h.update(bytes(text, "utf-8"))
h.update(bytes(self.voice_name, "utf-8"))
h.update(bytes(self.language_code, "utf-8"))
h.update(bytes(self.wav_codec, "utf-8"))
h.update(bytes(str(self.wav_sample_rate), "utf-8"))
h.update(bytes(str(self.speaking_rate), "utf-8"))
text_digest = h.hexdigest()
return os.path.join(self.CACHING_DIR, text_digest)
def tts(self, text):
"""
Synthesizes the text given and returns it in PCM format. This method uses the wave_sample_rate and wave_codec
properties to determine the shape of the synthesized audio.
The returned audio does not have any wave header but contains jus the pure PCM data.
Args:
text (str): The text to synthesize
Returns (bytes): The synthesized text in raw PCM format.
"""
cache_path = self.get_cache_path(text)
if os.path.isfile(cache_path):
wav_audio = None
with open(cache_path, "rb") as cfile:
wav_audio = cfile.read()
else:
mp3_audio = self.google_tts_call(text)
wav_audio = self.convert_audio(mp3_audio)
with open(cache_path, "wb") as cfile:
cfile.write(wav_audio)
return wav_audio
def google_tts_call(self, text):
"""
This method does a Google TTS call and returns the response (audio data in MP3 format) as bytes
Args:
text (str): The string to be synthesized
Returns (bytes): Audio data in MP3 format as bytes.
"""
request_data = {
"input": {"text": text},
"voice": {
"languageCode": self.language_code,
"name": self.voice_name,
"ssmlGender": self.ssml_gender,
},
"audioConfig": {
"speakingRate": self.speaking_rate,
"audioEncoding": "MP3",
}, # We always use MP3 audio encoding, because it is fast to download.
} # We convert that later on to the format we want
json_data = json.dumps(request_data)
# XXX: This API is in beta and might change
h1 = http.client.HTTPSConnection("texttospeech.googleapis.com")
h1.request(
"POST",
"/v1beta1/text:synthesize",
headers={
"Authorization": "Bearer %s" % self.gcloud_token(),
"Content-Type": "application/json; charset=utf-8",
},
body=json_data,
)
r1 = h1.getresponse()
response = r1.read()
base64_response = json.loads(response)
audio_data = base64.b64decode(base64_response["audioContent"])
return audio_data
def convert_audio(self, audio):
"""
Converts the given mp3 audio to the respecitve pcm data through ffmpeg.
This function assumes ffmpeg is installed and readily available.
Args:
audio (bytes): The mp3 audio data as given by Google TTS
Returns (bytes): The pcm data as specified by wav_codec and wav_sample_rate. Note that this byte array does not
contain the wave header (or any other header) but is just the raw audio data.
"""
tmp_mp3_name = self.TEMP_NAME + ".mp3"
tmp_wav_name = self.TEMP_NAME + ".wav"
tmp_mp3_path = os.path.join(self.TEMP_DIR, tmp_mp3_name)
tmp_wav_path = os.path.join(self.TEMP_DIR, tmp_wav_name)
with open(tmp_mp3_path, "wb") as f:
f.write(audio)
subprocess.call(
[
"ffmpeg",
"-i",
tmp_mp3_path,
"-acodec",
self.wav_codec,
"-ar",
str(self.wav_sample_rate),
tmp_wav_path,
"-y",
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
wav_audio = None
with wave.open(tmp_wav_path, "rb") as wav_file:
w_length = wav_file.getnframes()
wav_audio = wav_file.readframes(w_length)
# Cleanup
os.remove(tmp_mp3_path)
os.remove(tmp_wav_path)
return wav_audio
class GoogleTTSModule(retico_core.AbstractModule):
"""A Google TTS Module that uses Googles TTS service to synthesize audio."""
@staticmethod
def name():
return "Google TTS Module"
@staticmethod
def description():
return "A module that uses Google TTS to synthesize audio."
@staticmethod
def input_ius():
return [retico_core.text.TextIU]
@staticmethod
def output_iu():
return retico_core.audio.SpeechIU
def __init__(
self,
language_code,
voice_name,
speaking_rate=1.4,
caching=True,
frame_duration=0.05,
samplerate=44100,
**kwargs,
):
super().__init__(**kwargs)
self.language_code = language_code
self.voice_name = voice_name
self.speaking_rate = speaking_rate
self.caching = caching
self.gtts = GoogleTTS(language_code, voice_name, speaking_rate, caching)
self.samplewidth = 2
self.samplerate = samplerate
self.frame_duration = frame_duration
self._latest_text = ""
self.latest_input_iu = None
self.audio_buffer = []
self._tts_thread_active = False
self.audio_pointer = 0
self.clear_after_finish = False
def setup(self):
# We create the token on setup so that the first synthesis will not take long.
self.gtts.gcloud_token(use_cache=False)
def get_text(self):
return " ".join([iu.get_text() for iu in self.current_input])
def process_update(self, update_message):
if not update_message:
return None
for iu, ut in update_message:
if ut == retico_core.UpdateType.ADD:
self.current_input.append(iu)
self.latest_input_iu = iu
elif ut == retico_core.UpdateType.REVOKE:
self.revoke(iu)
elif ut == retico_core.UpdateType.COMMIT:
self.commit(iu)
current_text = self.get_text()
if self.input_committed() or len(current_text) - len(self._latest_text) > 40:
self._latest_text = current_text
chunk_size = int(self.samplerate * self.frame_duration)
chunk_size_bytes = chunk_size * self.samplewidth
new_audio = self.gtts.tts(current_text)
i = 0
new_buffer = []
while i < len(new_audio):
chunk = new_audio[i : i + chunk_size_bytes]
if len(chunk) < chunk_size_bytes:
chunk = chunk + b"\x00" * (chunk_size_bytes - len(chunk))
new_buffer.append(chunk)
i += chunk_size_bytes
if self.clear_after_finish:
self.audio_buffer.extend(new_buffer)
else:
self.audio_buffer = new_buffer
if self.input_committed():
self.clear_after_finish = True
self.current_input = []
return None
def _tts_thread(self):
t1 = time.time()
while self._tts_thread_active:
t2 = t1
t1 = time.time()
if t1 - t2 < self.frame_duration:
time.sleep(self.frame_duration)
else:
time.sleep(max((2 * self.frame_duration) - (t1 - t2), 0))
if self.audio_pointer >= len(self.audio_buffer):
raw_audio = (
b"\x00"
* self.samplewidth
* int(self.samplerate * self.frame_duration)
)
if self.clear_after_finish:
self.audio_pointer = 0
self.audio_buffer = []
self.clear_after_finish = False
else:
raw_audio = self.audio_buffer[self.audio_pointer]
self.audio_pointer += 1
iu = self.create_iu(self.latest_input_iu)
iu.set_audio(raw_audio, 1, self.samplerate, self.samplewidth)
um = retico_core.UpdateMessage.from_iu(iu, retico_core.UpdateType.ADD)
self.append(um)
def prepare_run(self):
self.audio_pointer = 0
self.audio_buffer = []
self._tts_thread_active = True
self.clear_after_finish = False
self._latest_text = ""
threading.Thread(target=self._tts_thread).start()
def shutdown(self):
self._tts_thread_active = False | /retico-googletts-0.1.3.tar.gz/retico-googletts-0.1.3/retico_googletts/googletts.py | 0.723798 | 0.166641 | googletts.py | pypi |
import transformers
transformers.logging.set_verbosity_error()
from transformers import pipeline
import retico_core
class HFTranslate:
TRANSLATION_MAP = {
"en_fr": "Helsinki-NLP/opus-mt-en-fr",
"fr_en": "Helsinki-NLP/opus-mt-fr-en",
"en_de": "Helsinki-NLP/opus-mt-en-de",
"de_en": "Helsinki-NLP/opus-mt-de-en",
"es_en": "Helsinki-NLP/opus-mt-es-en",
"en_es": "Helsinki-NLP/opus-mt-en-es",
"fr_de": "Helsinki-NLP/opus-mt-fr-de",
"de_fr": "Helsinki-NLP/opus-mt-de-fr",
}
def __init__(self, from_lang="en", to_lang="de"):
self.from_lang = from_lang
self.to_lang = to_lang
tr_key = f"{from_lang}_{to_lang}"
if not self.TRANSLATION_MAP.get(tr_key):
raise ValueError(f"Cannot translate from {from_lang} to {to_lang}.")
self.translator = pipeline("translation", model=self.TRANSLATION_MAP[tr_key])
def translate(self, text):
translation = self.translator(text)
return translation[0]["translation_text"]
class HFTranslateModule(retico_core.AbstractModule):
@staticmethod
def name():
return "Hugging Face Translation Module"
@staticmethod
def description():
return (
"A module that translates between languages using Hugging Face Transfomers."
)
@staticmethod
def input_ius():
return [retico_core.text.TextIU]
@staticmethod
def output_iu():
return retico_core.text.TextIU
def __init__(self, from_lang="en", to_lang="de", **kwargs):
super().__init__(**kwargs)
self.from_lang = from_lang
self.to_lang = to_lang
self.hftranslator = None
self._latest_text = ""
self._latest_translation = ""
self.latest_input_iu = None
def setup(self):
self.hftranslator = HFTranslate(self.from_lang, self.to_lang)
def shutdown(self):
self.hftranslator = None
def current_text(self):
return " ".join([iu.text for iu in self.current_input])
def process_update(self, update_message):
for iu, ut in update_message:
if ut == retico_core.UpdateType.ADD:
self.current_input.append(iu)
self.latest_input_iu = iu
elif ut == retico_core.UpdateType.REVOKE:
self.revoke(iu)
elif ut == retico_core.UpdateType.COMMIT:
self.commit(iu)
current_text = self.current_text()
current_translation = self.hftranslator.translate(current_text)
um, new_tokens = retico_core.text.get_text_increment(self, current_translation)
for token in new_tokens:
output_iu = self.create_iu(self.latest_input_iu)
output_iu.text = token
self.current_output.append(output_iu)
um.add_iu(output_iu, retico_core.UpdateType.ADD)
if self.input_committed():
for iu in self.current_output:
self.commit(iu)
um.add_iu(iu, retico_core.UpdateType.COMMIT)
self.current_input = []
self.current_output = []
return um | /retico-hftranslate-0.1.2.tar.gz/retico-hftranslate-0.1.2/retico_hftranslate/hftranslate.py | 0.647575 | 0.203846 | hftranslate.py | pypi |
# RetinaFace
<div align="center">
[](https://pepy.tech/project/retina-face)
[](https://anaconda.org/conda-forge/retina-face)
[](https://github.com/serengil/retinaface)
[](https://github.com/serengil/retinaface/blob/master/LICENSE)
[](https://www.patreon.com/serengil?repo=retinaface)
[](https://twitter.com/serengil)
[](https://doi.org/10.1109/ICEET53442.2021.9659697)
</div>
RetinaFace is a deep learning based cutting-edge facial detector for Python coming with facial landmarks. Its detection performance is amazing even in the crowd as shown in the following illustration.
RetinaFace is the face detection module of [insightface](https://github.com/deepinsight/insightface) project. The original implementation is mainly based on mxnet. Then, its tensorflow based [re-implementation](https://github.com/StanislasBertrand/RetinaFace-tf2) is published by [Stanislas Bertrand](https://github.com/StanislasBertrand). So, this repo is heavily inspired from the study of Stanislas Bertrand. Its source code is simplified and it is transformed to pip compatible but the main structure of the reference model and its pre-trained weights are same.
<p align="center"><img src="https://raw.githubusercontent.com/serengil/retinaface/master/tests/outputs/img3.jpg" width="90%" height="90%">
<br><em>Fenerbahce Women Volleyball Team</em>
</p>
## Installation [](https://pypi.org/project/retina-face/) [](https://anaconda.org/conda-forge/retina-face)
The easiest way to install retinaface is to download it from [PyPI](https://pypi.org/project/retina-face/). It's going to install the library itself and its prerequisites as well.
```shell
$ pip install retina-face
```
RetinaFace is also available at [`Conda`](https://anaconda.org/conda-forge/retina-face). You can alternatively install the package via conda.
```shell
$ conda install -c conda-forge retina-face
```
Then, you will be able to import the library and use its functionalities.
```python
from retinaface import RetinaFace
```
**Face Detection** - [`Demo`](https://youtu.be/Wm1DucuQk70)
RetinaFace offers a face detection function. It expects an exact path of an image as input.
```python
resp = RetinaFace.detect_faces("img1.jpg")
```
Then, it will return the facial area coordinates and some landmarks (eyes, nose and mouth) with a confidence score.
```json
{
"face_1": {
"score": 0.9993440508842468,
"facial_area": [155, 81, 434, 443],
"landmarks": {
"right_eye": [257.82974, 209.64787],
"left_eye": [374.93427, 251.78687],
"nose": [303.4773, 299.91144],
"mouth_right": [228.37329, 338.73193],
"mouth_left": [320.21982, 374.58798]
}
}
}
```
**Alignment** - [`Tutorial`](https://sefiks.com/2020/02/23/face-alignment-for-face-recognition-in-python-within-opencv/), [`Demo`](https://youtu.be/WA9i68g4meI)
A modern face recognition [pipeline](https://sefiks.com/2020/05/01/a-gentle-introduction-to-face-recognition-in-deep-learning/) consists of 4 common stages: detect, [align](https://sefiks.com/2020/02/23/face-alignment-for-face-recognition-in-python-within-opencv/), [normalize](https://sefiks.com/2020/11/20/facial-landmarks-for-face-recognition-with-dlib/), [represent](https://sefiks.com/2020/12/14/deep-face-recognition-with-arcface-in-keras-and-python/) and [verify](https://sefiks.com/2020/05/22/fine-tuning-the-threshold-in-face-recognition/). Experiments show that alignment increases the face recognition accuracy almost 1%. Here, retinaface can find the facial landmarks including eye coordinates. In this way, it can apply alignment to detected faces with its extracting faces function.
```python
import matplotlib.pyplot as plt
faces = RetinaFace.extract_faces(img_path = "img.jpg", align = True)
for face in faces:
plt.imshow(face)
plt.show()
```
<p align="center"><img src="https://raw.githubusercontent.com/serengil/retinaface/master/tests/outputs/alignment-procedure.png" width="80%" height="80%"></p>
**Face Recognition** - [`Demo`](https://youtu.be/WnUVYQP4h44)
Notice that face recognition module of insightface project is [ArcFace](https://sefiks.com/2020/12/14/deep-face-recognition-with-arcface-in-keras-and-python/), and face detection module is RetinaFace. ArcFace and RetinaFace pair is wrapped in [deepface](https://github.com/serengil/deepface) library for Python. Consider to use deepface if you need an end-to-end face recognition pipeline.
```python
#!pip install deepface
from deepface import DeepFace
obj = DeepFace.verify("img1.jpg", "img2.jpg"
, model_name = 'ArcFace', detector_backend = 'retinaface')
print(obj["verified"])
```
<p align="center"><img src="https://raw.githubusercontent.com/serengil/retinaface/master/tests/outputs/retinaface-arcface.png" width="100%" height="100%"></p>
Notice that ArcFace got 99.40% accuracy on [LFW data set](https://sefiks.com/2020/08/27/labeled-faces-in-the-wild-for-face-recognition/) whereas human beings just have 97.53% confidence.
## Support
There are many ways to support a project. Starring⭐️ the repo is just one 🙏
You can also support this work on [Patreon](https://www.patreon.com/serengil?repo=retinaface)
<a href="https://www.patreon.com/serengil?repo=retinaface">
<img src="https://raw.githubusercontent.com/serengil/retinaface/master/icons/patreon.png" width="30%" height="30%">
</a>
## Acknowledgements
This work is mainly based on the [insightface](https://github.com/deepinsight/insightface) project and [retinaface](https://arxiv.org/pdf/1905.00641.pdf) paper; and it is heavily inspired from the re-implementation of [retinaface-tf2](https://github.com/StanislasBertrand/RetinaFace-tf2) by [Stanislas Bertrand](https://github.com/StanislasBertrand). Finally, Bertrand's [implemenation](https://github.com/StanislasBertrand/RetinaFace-tf2/blob/master/rcnn/cython/cpu_nms.pyx) uses [Fast R-CNN](https://arxiv.org/abs/1504.08083) written by [Ross Girshick](https://github.com/rbgirshick/fast-rcnn) in the background. All of those reference studies are licensed under MIT license.
## Citation
If you are using RetinaFace in your research, please consider to cite its [original research paper](https://arxiv.org/abs/1905.00641). Besides, if you are using this re-implementation of retinaface, please consider to cite the following research papers as well. Here are examples of BibTeX entries:
```BibTeX
@inproceedings{serengil2020lightface,
title = {LightFace: A Hybrid Deep Face Recognition Framework},
author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
booktitle = {2020 Innovations in Intelligent Systems and Applications Conference (ASYU)},
pages = {23-27},
year = {2020},
doi = {10.1109/ASYU50717.2020.9259802},
url = {https://doi.org/10.1109/ASYU50717.2020.9259802},
organization = {IEEE}
}
```
```BibTeX
@inproceedings{serengil2021lightface,
title = {HyperExtended LightFace: A Facial Attribute Analysis Framework},
author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
booktitle = {2021 International Conference on Engineering and Emerging Technologies (ICEET)},
pages = {1-4},
year = {2021},
doi = {10.1109/ICEET53442.2021.9659697},
url = {https://doi.org/10.1109/ICEET53442.2021.9659697},
organization = {IEEE}
}
```
Finally, if you use this RetinaFace re-implementation in your GitHub projects, please add retina-face dependency in the requirements.txt.
## Licence
This project is licensed under the MIT License - see [`LICENSE`](https://github.com/serengil/retinaface/blob/master/LICENSE) for more details.
| /retina-face-0.0.13.tar.gz/retina-face-0.0.13/README.md | 0.714927 | 0.886862 | README.md | pypi |
import tensorflow as tf
import gdown
from pathlib import Path
import os
tf_version = int(tf.__version__.split(".")[0])
if tf_version == 1:
from keras.models import Model
from keras.layers import Input, BatchNormalization, ZeroPadding2D, Conv2D, ReLU, MaxPool2D, Add, UpSampling2D, concatenate, Softmax
else:
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, BatchNormalization, ZeroPadding2D, Conv2D, ReLU, MaxPool2D, Add, UpSampling2D, concatenate, Softmax
def load_weights(model):
home = str(os.getenv('DEEPFACE_HOME', default=Path.home()))
exact_file = home+'/.deepface/weights/retinaface.h5'
#url = 'https://drive.google.com/file/d/1K3Eq2k1b9dpKkucZjPAiCCnNzfCMosK4'
#url = 'https://drive.google.com/uc?id=1K3Eq2k1b9dpKkucZjPAiCCnNzfCMosK4'
url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/retinaface.h5'
#-----------------------------
if not os.path.exists(home+"/.deepface"):
os.mkdir(home+"/.deepface")
print("Directory ",home,"/.deepface created")
if not os.path.exists(home+"/.deepface/weights"):
os.mkdir(home+"/.deepface/weights")
print("Directory ",home,"/.deepface/weights created")
#-----------------------------
if os.path.isfile(exact_file) != True:
print("retinaface.h5 will be downloaded from the url "+url)
gdown.download(url, exact_file, quiet=False)
#-----------------------------
#gdown should download the pretrained weights here. If it does not still exist, then throw an exception.
if os.path.isfile(exact_file) != True:
raise ValueError("Pre-trained weight could not be loaded!"
+" You might try to download the pre-trained weights from the url "+ url
+ " and copy it to the ", exact_file, "manually.")
model.load_weights(exact_file)
return model
def build_model():
data = Input(dtype=tf.float32, shape=(None, None, 3), name='data')
bn_data = BatchNormalization(epsilon=1.9999999494757503e-05, name='bn_data', trainable=False)(data)
conv0_pad = ZeroPadding2D(padding=tuple([3, 3]))(bn_data)
conv0 = Conv2D(filters = 64, kernel_size = (7, 7), name = 'conv0', strides = [2, 2], padding = 'VALID', use_bias = False)(conv0_pad)
bn0 = BatchNormalization(epsilon=1.9999999494757503e-05, name='bn0', trainable=False)(conv0)
relu0 = ReLU(name='relu0')(bn0)
pooling0_pad = ZeroPadding2D(padding=tuple([1, 1]))(relu0)
pooling0 = MaxPool2D((3, 3), (2, 2), padding='VALID', name='pooling0')(pooling0_pad)
stage1_unit1_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit1_bn1', trainable=False)(pooling0)
stage1_unit1_relu1 = ReLU(name='stage1_unit1_relu1')(stage1_unit1_bn1)
stage1_unit1_conv1 = Conv2D(filters = 64, kernel_size = (1, 1), name = 'stage1_unit1_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit1_relu1)
stage1_unit1_sc = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage1_unit1_sc', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit1_relu1)
stage1_unit1_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit1_bn2', trainable=False)(stage1_unit1_conv1)
stage1_unit1_relu2 = ReLU(name='stage1_unit1_relu2')(stage1_unit1_bn2)
stage1_unit1_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage1_unit1_relu2)
stage1_unit1_conv2 = Conv2D(filters = 64, kernel_size = (3, 3), name = 'stage1_unit1_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit1_conv2_pad)
stage1_unit1_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit1_bn3', trainable=False)(stage1_unit1_conv2)
stage1_unit1_relu3 = ReLU(name='stage1_unit1_relu3')(stage1_unit1_bn3)
stage1_unit1_conv3 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage1_unit1_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit1_relu3)
plus0_v1 = Add()([stage1_unit1_conv3 , stage1_unit1_sc])
stage1_unit2_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit2_bn1', trainable=False)(plus0_v1)
stage1_unit2_relu1 = ReLU(name='stage1_unit2_relu1')(stage1_unit2_bn1)
stage1_unit2_conv1 = Conv2D(filters = 64, kernel_size = (1, 1), name = 'stage1_unit2_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit2_relu1)
stage1_unit2_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit2_bn2', trainable=False)(stage1_unit2_conv1)
stage1_unit2_relu2 = ReLU(name='stage1_unit2_relu2')(stage1_unit2_bn2)
stage1_unit2_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage1_unit2_relu2)
stage1_unit2_conv2 = Conv2D(filters = 64, kernel_size = (3, 3), name = 'stage1_unit2_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit2_conv2_pad)
stage1_unit2_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit2_bn3', trainable=False)(stage1_unit2_conv2)
stage1_unit2_relu3 = ReLU(name='stage1_unit2_relu3')(stage1_unit2_bn3)
stage1_unit2_conv3 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage1_unit2_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit2_relu3)
plus1_v2 = Add()([stage1_unit2_conv3 , plus0_v1])
stage1_unit3_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit3_bn1', trainable=False)(plus1_v2)
stage1_unit3_relu1 = ReLU(name='stage1_unit3_relu1')(stage1_unit3_bn1)
stage1_unit3_conv1 = Conv2D(filters = 64, kernel_size = (1, 1), name = 'stage1_unit3_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit3_relu1)
stage1_unit3_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit3_bn2', trainable=False)(stage1_unit3_conv1)
stage1_unit3_relu2 = ReLU(name='stage1_unit3_relu2')(stage1_unit3_bn2)
stage1_unit3_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage1_unit3_relu2)
stage1_unit3_conv2 = Conv2D(filters = 64, kernel_size = (3, 3), name = 'stage1_unit3_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit3_conv2_pad)
stage1_unit3_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit3_bn3', trainable=False)(stage1_unit3_conv2)
stage1_unit3_relu3 = ReLU(name='stage1_unit3_relu3')(stage1_unit3_bn3)
stage1_unit3_conv3 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage1_unit3_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit3_relu3)
plus2 = Add()([stage1_unit3_conv3 , plus1_v2])
stage2_unit1_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit1_bn1', trainable=False)(plus2)
stage2_unit1_relu1 = ReLU(name='stage2_unit1_relu1')(stage2_unit1_bn1)
stage2_unit1_conv1 = Conv2D(filters = 128, kernel_size = (1, 1), name = 'stage2_unit1_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit1_relu1)
stage2_unit1_sc = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage2_unit1_sc', strides = [2, 2], padding = 'VALID', use_bias = False)(stage2_unit1_relu1)
stage2_unit1_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit1_bn2', trainable=False)(stage2_unit1_conv1)
stage2_unit1_relu2 = ReLU(name='stage2_unit1_relu2')(stage2_unit1_bn2)
stage2_unit1_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage2_unit1_relu2)
stage2_unit1_conv2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'stage2_unit1_conv2', strides = [2, 2], padding = 'VALID', use_bias = False)(stage2_unit1_conv2_pad)
stage2_unit1_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit1_bn3', trainable=False)(stage2_unit1_conv2)
stage2_unit1_relu3 = ReLU(name='stage2_unit1_relu3')(stage2_unit1_bn3)
stage2_unit1_conv3 = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage2_unit1_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit1_relu3)
plus3 = Add()([stage2_unit1_conv3 , stage2_unit1_sc])
stage2_unit2_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit2_bn1', trainable=False)(plus3)
stage2_unit2_relu1 = ReLU(name='stage2_unit2_relu1')(stage2_unit2_bn1)
stage2_unit2_conv1 = Conv2D(filters = 128, kernel_size = (1, 1), name = 'stage2_unit2_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit2_relu1)
stage2_unit2_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit2_bn2', trainable=False)(stage2_unit2_conv1)
stage2_unit2_relu2 = ReLU(name='stage2_unit2_relu2')(stage2_unit2_bn2)
stage2_unit2_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage2_unit2_relu2)
stage2_unit2_conv2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'stage2_unit2_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit2_conv2_pad)
stage2_unit2_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit2_bn3', trainable=False)(stage2_unit2_conv2)
stage2_unit2_relu3 = ReLU(name='stage2_unit2_relu3')(stage2_unit2_bn3)
stage2_unit2_conv3 = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage2_unit2_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit2_relu3)
plus4 = Add()([stage2_unit2_conv3 , plus3])
stage2_unit3_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit3_bn1', trainable=False)(plus4)
stage2_unit3_relu1 = ReLU(name='stage2_unit3_relu1')(stage2_unit3_bn1)
stage2_unit3_conv1 = Conv2D(filters = 128, kernel_size = (1, 1), name = 'stage2_unit3_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit3_relu1)
stage2_unit3_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit3_bn2', trainable=False)(stage2_unit3_conv1)
stage2_unit3_relu2 = ReLU(name='stage2_unit3_relu2')(stage2_unit3_bn2)
stage2_unit3_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage2_unit3_relu2)
stage2_unit3_conv2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'stage2_unit3_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit3_conv2_pad)
stage2_unit3_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit3_bn3', trainable=False)(stage2_unit3_conv2)
stage2_unit3_relu3 = ReLU(name='stage2_unit3_relu3')(stage2_unit3_bn3)
stage2_unit3_conv3 = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage2_unit3_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit3_relu3)
plus5 = Add()([stage2_unit3_conv3 , plus4])
stage2_unit4_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit4_bn1', trainable=False)(plus5)
stage2_unit4_relu1 = ReLU(name='stage2_unit4_relu1')(stage2_unit4_bn1)
stage2_unit4_conv1 = Conv2D(filters = 128, kernel_size = (1, 1), name = 'stage2_unit4_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit4_relu1)
stage2_unit4_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit4_bn2', trainable=False)(stage2_unit4_conv1)
stage2_unit4_relu2 = ReLU(name='stage2_unit4_relu2')(stage2_unit4_bn2)
stage2_unit4_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage2_unit4_relu2)
stage2_unit4_conv2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'stage2_unit4_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit4_conv2_pad)
stage2_unit4_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit4_bn3', trainable=False)(stage2_unit4_conv2)
stage2_unit4_relu3 = ReLU(name='stage2_unit4_relu3')(stage2_unit4_bn3)
stage2_unit4_conv3 = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage2_unit4_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit4_relu3)
plus6 = Add()([stage2_unit4_conv3 , plus5])
stage3_unit1_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit1_bn1', trainable=False)(plus6)
stage3_unit1_relu1 = ReLU(name='stage3_unit1_relu1')(stage3_unit1_bn1)
stage3_unit1_conv1 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage3_unit1_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit1_relu1)
stage3_unit1_sc = Conv2D(filters = 1024, kernel_size = (1, 1), name = 'stage3_unit1_sc', strides = [2, 2], padding = 'VALID', use_bias = False)(stage3_unit1_relu1)
stage3_unit1_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit1_bn2', trainable=False)(stage3_unit1_conv1)
stage3_unit1_relu2 = ReLU(name='stage3_unit1_relu2')(stage3_unit1_bn2)
stage3_unit1_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage3_unit1_relu2)
stage3_unit1_conv2 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'stage3_unit1_conv2', strides = [2, 2], padding = 'VALID', use_bias = False)(stage3_unit1_conv2_pad)
ssh_m1_red_conv = Conv2D(filters = 256, kernel_size = (1, 1), name = 'ssh_m1_red_conv', strides = [1, 1], padding = 'VALID', use_bias = True)(stage3_unit1_relu2)
stage3_unit1_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit1_bn3', trainable=False)(stage3_unit1_conv2)
ssh_m1_red_conv_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m1_red_conv_bn', trainable=False)(ssh_m1_red_conv)
stage3_unit1_relu3 = ReLU(name='stage3_unit1_relu3')(stage3_unit1_bn3)
ssh_m1_red_conv_relu = ReLU(name='ssh_m1_red_conv_relu')(ssh_m1_red_conv_bn)
stage3_unit1_conv3 = Conv2D(filters = 1024, kernel_size = (1, 1), name = 'stage3_unit1_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit1_relu3)
plus7 = Add()([stage3_unit1_conv3 , stage3_unit1_sc])
stage3_unit2_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit2_bn1', trainable=False)(plus7)
stage3_unit2_relu1 = ReLU(name='stage3_unit2_relu1')(stage3_unit2_bn1)
stage3_unit2_conv1 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage3_unit2_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit2_relu1)
stage3_unit2_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit2_bn2', trainable=False)(stage3_unit2_conv1)
stage3_unit2_relu2 = ReLU(name='stage3_unit2_relu2')(stage3_unit2_bn2)
stage3_unit2_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage3_unit2_relu2)
stage3_unit2_conv2 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'stage3_unit2_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit2_conv2_pad)
stage3_unit2_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit2_bn3', trainable=False)(stage3_unit2_conv2)
stage3_unit2_relu3 = ReLU(name='stage3_unit2_relu3')(stage3_unit2_bn3)
stage3_unit2_conv3 = Conv2D(filters = 1024, kernel_size = (1, 1), name = 'stage3_unit2_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit2_relu3)
plus8 = Add()([stage3_unit2_conv3 , plus7])
stage3_unit3_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit3_bn1', trainable=False)(plus8)
stage3_unit3_relu1 = ReLU(name='stage3_unit3_relu1')(stage3_unit3_bn1)
stage3_unit3_conv1 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage3_unit3_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit3_relu1)
stage3_unit3_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit3_bn2', trainable=False)(stage3_unit3_conv1)
stage3_unit3_relu2 = ReLU(name='stage3_unit3_relu2')(stage3_unit3_bn2)
stage3_unit3_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage3_unit3_relu2)
stage3_unit3_conv2 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'stage3_unit3_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit3_conv2_pad)
stage3_unit3_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit3_bn3', trainable=False)(stage3_unit3_conv2)
stage3_unit3_relu3 = ReLU(name='stage3_unit3_relu3')(stage3_unit3_bn3)
stage3_unit3_conv3 = Conv2D(filters = 1024, kernel_size = (1, 1), name = 'stage3_unit3_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit3_relu3)
plus9 = Add()([stage3_unit3_conv3 , plus8])
stage3_unit4_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit4_bn1', trainable=False)(plus9)
stage3_unit4_relu1 = ReLU(name='stage3_unit4_relu1')(stage3_unit4_bn1)
stage3_unit4_conv1 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage3_unit4_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit4_relu1)
stage3_unit4_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit4_bn2', trainable=False)(stage3_unit4_conv1)
stage3_unit4_relu2 = ReLU(name='stage3_unit4_relu2')(stage3_unit4_bn2)
stage3_unit4_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage3_unit4_relu2)
stage3_unit4_conv2 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'stage3_unit4_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit4_conv2_pad)
stage3_unit4_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit4_bn3', trainable=False)(stage3_unit4_conv2)
stage3_unit4_relu3 = ReLU(name='stage3_unit4_relu3')(stage3_unit4_bn3)
stage3_unit4_conv3 = Conv2D(filters = 1024, kernel_size = (1, 1), name = 'stage3_unit4_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit4_relu3)
plus10 = Add()([stage3_unit4_conv3 , plus9])
stage3_unit5_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit5_bn1', trainable=False)(plus10)
stage3_unit5_relu1 = ReLU(name='stage3_unit5_relu1')(stage3_unit5_bn1)
stage3_unit5_conv1 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage3_unit5_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit5_relu1)
stage3_unit5_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit5_bn2', trainable=False)(stage3_unit5_conv1)
stage3_unit5_relu2 = ReLU(name='stage3_unit5_relu2')(stage3_unit5_bn2)
stage3_unit5_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage3_unit5_relu2)
stage3_unit5_conv2 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'stage3_unit5_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit5_conv2_pad)
stage3_unit5_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit5_bn3', trainable=False)(stage3_unit5_conv2)
stage3_unit5_relu3 = ReLU(name='stage3_unit5_relu3')(stage3_unit5_bn3)
stage3_unit5_conv3 = Conv2D(filters = 1024, kernel_size = (1, 1), name = 'stage3_unit5_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit5_relu3)
plus11 = Add()([stage3_unit5_conv3 , plus10])
stage3_unit6_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit6_bn1', trainable=False)(plus11)
stage3_unit6_relu1 = ReLU(name='stage3_unit6_relu1')(stage3_unit6_bn1)
stage3_unit6_conv1 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage3_unit6_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit6_relu1)
stage3_unit6_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit6_bn2', trainable=False)(stage3_unit6_conv1)
stage3_unit6_relu2 = ReLU(name='stage3_unit6_relu2')(stage3_unit6_bn2)
stage3_unit6_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage3_unit6_relu2)
stage3_unit6_conv2 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'stage3_unit6_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit6_conv2_pad)
stage3_unit6_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit6_bn3', trainable=False)(stage3_unit6_conv2)
stage3_unit6_relu3 = ReLU(name='stage3_unit6_relu3')(stage3_unit6_bn3)
stage3_unit6_conv3 = Conv2D(filters = 1024, kernel_size = (1, 1), name = 'stage3_unit6_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit6_relu3)
plus12 = Add()([stage3_unit6_conv3 , plus11])
stage4_unit1_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit1_bn1', trainable=False)(plus12)
stage4_unit1_relu1 = ReLU(name='stage4_unit1_relu1')(stage4_unit1_bn1)
stage4_unit1_conv1 = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage4_unit1_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit1_relu1)
stage4_unit1_sc = Conv2D(filters = 2048, kernel_size = (1, 1), name = 'stage4_unit1_sc', strides = [2, 2], padding = 'VALID', use_bias = False)(stage4_unit1_relu1)
stage4_unit1_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit1_bn2', trainable=False)(stage4_unit1_conv1)
stage4_unit1_relu2 = ReLU(name='stage4_unit1_relu2')(stage4_unit1_bn2)
stage4_unit1_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage4_unit1_relu2)
stage4_unit1_conv2 = Conv2D(filters = 512, kernel_size = (3, 3), name = 'stage4_unit1_conv2', strides = [2, 2], padding = 'VALID', use_bias = False)(stage4_unit1_conv2_pad)
ssh_c2_lateral = Conv2D(filters = 256, kernel_size = (1, 1), name = 'ssh_c2_lateral', strides = [1, 1], padding = 'VALID', use_bias = True)(stage4_unit1_relu2)
stage4_unit1_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit1_bn3', trainable=False)(stage4_unit1_conv2)
ssh_c2_lateral_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_c2_lateral_bn', trainable=False)(ssh_c2_lateral)
stage4_unit1_relu3 = ReLU(name='stage4_unit1_relu3')(stage4_unit1_bn3)
ssh_c2_lateral_relu = ReLU(name='ssh_c2_lateral_relu')(ssh_c2_lateral_bn)
stage4_unit1_conv3 = Conv2D(filters = 2048, kernel_size = (1, 1), name = 'stage4_unit1_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit1_relu3)
plus13 = Add()([stage4_unit1_conv3 , stage4_unit1_sc])
stage4_unit2_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit2_bn1', trainable=False)(plus13)
stage4_unit2_relu1 = ReLU(name='stage4_unit2_relu1')(stage4_unit2_bn1)
stage4_unit2_conv1 = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage4_unit2_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit2_relu1)
stage4_unit2_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit2_bn2', trainable=False)(stage4_unit2_conv1)
stage4_unit2_relu2 = ReLU(name='stage4_unit2_relu2')(stage4_unit2_bn2)
stage4_unit2_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage4_unit2_relu2)
stage4_unit2_conv2 = Conv2D(filters = 512, kernel_size = (3, 3), name = 'stage4_unit2_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit2_conv2_pad)
stage4_unit2_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit2_bn3', trainable=False)(stage4_unit2_conv2)
stage4_unit2_relu3 = ReLU(name='stage4_unit2_relu3')(stage4_unit2_bn3)
stage4_unit2_conv3 = Conv2D(filters = 2048, kernel_size = (1, 1), name = 'stage4_unit2_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit2_relu3)
plus14 = Add()([stage4_unit2_conv3 , plus13])
stage4_unit3_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit3_bn1', trainable=False)(plus14)
stage4_unit3_relu1 = ReLU(name='stage4_unit3_relu1')(stage4_unit3_bn1)
stage4_unit3_conv1 = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage4_unit3_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit3_relu1)
stage4_unit3_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit3_bn2', trainable=False)(stage4_unit3_conv1)
stage4_unit3_relu2 = ReLU(name='stage4_unit3_relu2')(stage4_unit3_bn2)
stage4_unit3_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage4_unit3_relu2)
stage4_unit3_conv2 = Conv2D(filters = 512, kernel_size = (3, 3), name = 'stage4_unit3_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit3_conv2_pad)
stage4_unit3_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit3_bn3', trainable=False)(stage4_unit3_conv2)
stage4_unit3_relu3 = ReLU(name='stage4_unit3_relu3')(stage4_unit3_bn3)
stage4_unit3_conv3 = Conv2D(filters = 2048, kernel_size = (1, 1), name = 'stage4_unit3_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit3_relu3)
plus15 = Add()([stage4_unit3_conv3 , plus14])
bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='bn1', trainable=False)(plus15)
relu1 = ReLU(name='relu1')(bn1)
ssh_c3_lateral = Conv2D(filters = 256, kernel_size = (1, 1), name = 'ssh_c3_lateral', strides = [1, 1], padding = 'VALID', use_bias = True)(relu1)
ssh_c3_lateral_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_c3_lateral_bn', trainable=False)(ssh_c3_lateral)
ssh_c3_lateral_relu = ReLU(name='ssh_c3_lateral_relu')(ssh_c3_lateral_bn)
ssh_m3_det_conv1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_c3_lateral_relu)
ssh_m3_det_conv1 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'ssh_m3_det_conv1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_conv1_pad)
ssh_m3_det_context_conv1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_c3_lateral_relu)
ssh_m3_det_context_conv1 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m3_det_context_conv1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_context_conv1_pad)
ssh_c3_up = UpSampling2D(size=(2, 2), interpolation="nearest", name="ssh_c3_up")(ssh_c3_lateral_relu)
ssh_m3_det_conv1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m3_det_conv1_bn', trainable=False)(ssh_m3_det_conv1)
ssh_m3_det_context_conv1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m3_det_context_conv1_bn', trainable=False)(ssh_m3_det_context_conv1)
x1_shape = tf.shape(ssh_c3_up)
x2_shape = tf.shape(ssh_c2_lateral_relu)
offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, 0]
size = [-1, x2_shape[1], x2_shape[2], -1]
crop0 = tf.slice(ssh_c3_up, offsets, size, "crop0")
ssh_m3_det_context_conv1_relu = ReLU(name='ssh_m3_det_context_conv1_relu')(ssh_m3_det_context_conv1_bn)
plus0_v2 = Add()([ssh_c2_lateral_relu , crop0])
ssh_m3_det_context_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m3_det_context_conv1_relu)
ssh_m3_det_context_conv2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m3_det_context_conv2', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_context_conv2_pad)
ssh_m3_det_context_conv3_1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m3_det_context_conv1_relu)
ssh_m3_det_context_conv3_1 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m3_det_context_conv3_1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_context_conv3_1_pad)
ssh_c2_aggr_pad = ZeroPadding2D(padding=tuple([1, 1]))(plus0_v2)
ssh_c2_aggr = Conv2D(filters = 256, kernel_size = (3, 3), name = 'ssh_c2_aggr', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_c2_aggr_pad)
ssh_m3_det_context_conv2_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m3_det_context_conv2_bn', trainable=False)(ssh_m3_det_context_conv2)
ssh_m3_det_context_conv3_1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m3_det_context_conv3_1_bn', trainable=False)(ssh_m3_det_context_conv3_1)
ssh_c2_aggr_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_c2_aggr_bn', trainable=False)(ssh_c2_aggr)
ssh_m3_det_context_conv3_1_relu = ReLU(name='ssh_m3_det_context_conv3_1_relu')(ssh_m3_det_context_conv3_1_bn)
ssh_c2_aggr_relu = ReLU(name='ssh_c2_aggr_relu')(ssh_c2_aggr_bn)
ssh_m3_det_context_conv3_2_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m3_det_context_conv3_1_relu)
ssh_m3_det_context_conv3_2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m3_det_context_conv3_2', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_context_conv3_2_pad)
ssh_m2_det_conv1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_c2_aggr_relu)
ssh_m2_det_conv1 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'ssh_m2_det_conv1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_conv1_pad)
ssh_m2_det_context_conv1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_c2_aggr_relu)
ssh_m2_det_context_conv1 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m2_det_context_conv1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_context_conv1_pad)
ssh_m2_red_up = UpSampling2D(size=(2, 2), interpolation="nearest", name="ssh_m2_red_up")(ssh_c2_aggr_relu)
ssh_m3_det_context_conv3_2_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m3_det_context_conv3_2_bn', trainable=False)(ssh_m3_det_context_conv3_2)
ssh_m2_det_conv1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m2_det_conv1_bn', trainable=False)(ssh_m2_det_conv1)
ssh_m2_det_context_conv1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m2_det_context_conv1_bn', trainable=False)(ssh_m2_det_context_conv1)
x1_shape = tf.shape(ssh_m2_red_up)
x2_shape = tf.shape(ssh_m1_red_conv_relu)
offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, 0]
size = [-1, x2_shape[1], x2_shape[2], -1]
crop1 = tf.slice(ssh_m2_red_up, offsets, size, "crop1")
ssh_m3_det_concat = concatenate([ssh_m3_det_conv1_bn, ssh_m3_det_context_conv2_bn, ssh_m3_det_context_conv3_2_bn], 3, name='ssh_m3_det_concat')
ssh_m2_det_context_conv1_relu = ReLU(name='ssh_m2_det_context_conv1_relu')(ssh_m2_det_context_conv1_bn)
plus1_v1 = Add()([ssh_m1_red_conv_relu , crop1])
ssh_m3_det_concat_relu = ReLU(name='ssh_m3_det_concat_relu')(ssh_m3_det_concat)
ssh_m2_det_context_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m2_det_context_conv1_relu)
ssh_m2_det_context_conv2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m2_det_context_conv2', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_context_conv2_pad)
ssh_m2_det_context_conv3_1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m2_det_context_conv1_relu)
ssh_m2_det_context_conv3_1 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m2_det_context_conv3_1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_context_conv3_1_pad)
ssh_c1_aggr_pad = ZeroPadding2D(padding=tuple([1, 1]))(plus1_v1)
ssh_c1_aggr = Conv2D(filters = 256, kernel_size = (3, 3), name = 'ssh_c1_aggr', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_c1_aggr_pad)
face_rpn_cls_score_stride32 = Conv2D(filters = 4, kernel_size = (1, 1), name = 'face_rpn_cls_score_stride32', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_concat_relu)
inter_1 = concatenate([face_rpn_cls_score_stride32[:, :, :, 0], face_rpn_cls_score_stride32[:, :, :, 1]], axis=1)
inter_2 = concatenate([face_rpn_cls_score_stride32[:, :, :, 2], face_rpn_cls_score_stride32[:, :, :, 3]], axis=1)
final = tf.stack([inter_1, inter_2])
face_rpn_cls_score_reshape_stride32 = tf.transpose(final, (1, 2, 3, 0), name="face_rpn_cls_score_reshape_stride32")
face_rpn_bbox_pred_stride32 = Conv2D(filters = 8, kernel_size = (1, 1), name = 'face_rpn_bbox_pred_stride32', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_concat_relu)
face_rpn_landmark_pred_stride32 = Conv2D(filters = 20, kernel_size = (1, 1), name = 'face_rpn_landmark_pred_stride32', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_concat_relu)
ssh_m2_det_context_conv2_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m2_det_context_conv2_bn', trainable=False)(ssh_m2_det_context_conv2)
ssh_m2_det_context_conv3_1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m2_det_context_conv3_1_bn', trainable=False)(ssh_m2_det_context_conv3_1)
ssh_c1_aggr_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_c1_aggr_bn', trainable=False)(ssh_c1_aggr)
ssh_m2_det_context_conv3_1_relu = ReLU(name='ssh_m2_det_context_conv3_1_relu')(ssh_m2_det_context_conv3_1_bn)
ssh_c1_aggr_relu = ReLU(name='ssh_c1_aggr_relu')(ssh_c1_aggr_bn)
face_rpn_cls_prob_stride32 = Softmax(name = 'face_rpn_cls_prob_stride32')(face_rpn_cls_score_reshape_stride32)
input_shape = [tf.shape(face_rpn_cls_prob_stride32)[k] for k in range(4)]
sz = tf.dtypes.cast(input_shape[1] / 2, dtype=tf.int32)
inter_1 = face_rpn_cls_prob_stride32[:, 0:sz, :, 0]
inter_2 = face_rpn_cls_prob_stride32[:, 0:sz, :, 1]
inter_3 = face_rpn_cls_prob_stride32[:, sz:, :, 0]
inter_4 = face_rpn_cls_prob_stride32[:, sz:, :, 1]
final = tf.stack([inter_1, inter_3, inter_2, inter_4])
face_rpn_cls_prob_reshape_stride32 = tf.transpose(final, (1, 2, 3, 0), name="face_rpn_cls_prob_reshape_stride32")
ssh_m2_det_context_conv3_2_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m2_det_context_conv3_1_relu)
ssh_m2_det_context_conv3_2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m2_det_context_conv3_2', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_context_conv3_2_pad)
ssh_m1_det_conv1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_c1_aggr_relu)
ssh_m1_det_conv1 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'ssh_m1_det_conv1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_conv1_pad)
ssh_m1_det_context_conv1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_c1_aggr_relu)
ssh_m1_det_context_conv1 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m1_det_context_conv1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_context_conv1_pad)
ssh_m2_det_context_conv3_2_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m2_det_context_conv3_2_bn', trainable=False)(ssh_m2_det_context_conv3_2)
ssh_m1_det_conv1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m1_det_conv1_bn', trainable=False)(ssh_m1_det_conv1)
ssh_m1_det_context_conv1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m1_det_context_conv1_bn', trainable=False)(ssh_m1_det_context_conv1)
ssh_m2_det_concat = concatenate([ssh_m2_det_conv1_bn, ssh_m2_det_context_conv2_bn, ssh_m2_det_context_conv3_2_bn], 3, name='ssh_m2_det_concat')
ssh_m1_det_context_conv1_relu = ReLU(name='ssh_m1_det_context_conv1_relu')(ssh_m1_det_context_conv1_bn)
ssh_m2_det_concat_relu = ReLU(name='ssh_m2_det_concat_relu')(ssh_m2_det_concat)
ssh_m1_det_context_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m1_det_context_conv1_relu)
ssh_m1_det_context_conv2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m1_det_context_conv2', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_context_conv2_pad)
ssh_m1_det_context_conv3_1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m1_det_context_conv1_relu)
ssh_m1_det_context_conv3_1 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m1_det_context_conv3_1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_context_conv3_1_pad)
face_rpn_cls_score_stride16 = Conv2D(filters = 4, kernel_size = (1, 1), name = 'face_rpn_cls_score_stride16', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_concat_relu)
inter_1 = concatenate([face_rpn_cls_score_stride16[:, :, :, 0], face_rpn_cls_score_stride16[:, :, :, 1]], axis=1)
inter_2 = concatenate([face_rpn_cls_score_stride16[:, :, :, 2], face_rpn_cls_score_stride16[:, :, :, 3]], axis=1)
final = tf.stack([inter_1, inter_2])
face_rpn_cls_score_reshape_stride16 = tf.transpose(final, (1, 2, 3, 0), name="face_rpn_cls_score_reshape_stride16")
face_rpn_bbox_pred_stride16 = Conv2D(filters = 8, kernel_size = (1, 1), name = 'face_rpn_bbox_pred_stride16', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_concat_relu)
face_rpn_landmark_pred_stride16 = Conv2D(filters = 20, kernel_size = (1, 1), name = 'face_rpn_landmark_pred_stride16', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_concat_relu)
ssh_m1_det_context_conv2_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m1_det_context_conv2_bn', trainable=False)(ssh_m1_det_context_conv2)
ssh_m1_det_context_conv3_1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m1_det_context_conv3_1_bn', trainable=False)(ssh_m1_det_context_conv3_1)
ssh_m1_det_context_conv3_1_relu = ReLU(name='ssh_m1_det_context_conv3_1_relu')(ssh_m1_det_context_conv3_1_bn)
face_rpn_cls_prob_stride16 = Softmax(name = 'face_rpn_cls_prob_stride16')(face_rpn_cls_score_reshape_stride16)
input_shape = [tf.shape(face_rpn_cls_prob_stride16)[k] for k in range(4)]
sz = tf.dtypes.cast(input_shape[1] / 2, dtype=tf.int32)
inter_1 = face_rpn_cls_prob_stride16[:, 0:sz, :, 0]
inter_2 = face_rpn_cls_prob_stride16[:, 0:sz, :, 1]
inter_3 = face_rpn_cls_prob_stride16[:, sz:, :, 0]
inter_4 = face_rpn_cls_prob_stride16[:, sz:, :, 1]
final = tf.stack([inter_1, inter_3, inter_2, inter_4])
face_rpn_cls_prob_reshape_stride16 = tf.transpose(final, (1, 2, 3, 0), name="face_rpn_cls_prob_reshape_stride16")
ssh_m1_det_context_conv3_2_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m1_det_context_conv3_1_relu)
ssh_m1_det_context_conv3_2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m1_det_context_conv3_2', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_context_conv3_2_pad)
ssh_m1_det_context_conv3_2_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m1_det_context_conv3_2_bn', trainable=False)(ssh_m1_det_context_conv3_2)
ssh_m1_det_concat = concatenate([ssh_m1_det_conv1_bn, ssh_m1_det_context_conv2_bn, ssh_m1_det_context_conv3_2_bn], 3, name='ssh_m1_det_concat')
ssh_m1_det_concat_relu = ReLU(name='ssh_m1_det_concat_relu')(ssh_m1_det_concat)
face_rpn_cls_score_stride8 = Conv2D(filters = 4, kernel_size = (1, 1), name = 'face_rpn_cls_score_stride8', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_concat_relu)
inter_1 = concatenate([face_rpn_cls_score_stride8[:, :, :, 0], face_rpn_cls_score_stride8[:, :, :, 1]], axis=1)
inter_2 = concatenate([face_rpn_cls_score_stride8[:, :, :, 2], face_rpn_cls_score_stride8[:, :, :, 3]], axis=1)
final = tf.stack([inter_1, inter_2])
face_rpn_cls_score_reshape_stride8 = tf.transpose(final, (1, 2, 3, 0), name="face_rpn_cls_score_reshape_stride8")
face_rpn_bbox_pred_stride8 = Conv2D(filters = 8, kernel_size = (1, 1), name = 'face_rpn_bbox_pred_stride8', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_concat_relu)
face_rpn_landmark_pred_stride8 = Conv2D(filters = 20, kernel_size = (1, 1), name = 'face_rpn_landmark_pred_stride8', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_concat_relu)
face_rpn_cls_prob_stride8 = Softmax(name = 'face_rpn_cls_prob_stride8')(face_rpn_cls_score_reshape_stride8)
input_shape = [tf.shape(face_rpn_cls_prob_stride8)[k] for k in range(4)]
sz = tf.dtypes.cast(input_shape[1] / 2, dtype=tf.int32)
inter_1 = face_rpn_cls_prob_stride8[:, 0:sz, :, 0]
inter_2 = face_rpn_cls_prob_stride8[:, 0:sz, :, 1]
inter_3 = face_rpn_cls_prob_stride8[:, sz:, :, 0]
inter_4 = face_rpn_cls_prob_stride8[:, sz:, :, 1]
final = tf.stack([inter_1, inter_3, inter_2, inter_4])
face_rpn_cls_prob_reshape_stride8 = tf.transpose(final, (1, 2, 3, 0), name="face_rpn_cls_prob_reshape_stride8")
model = Model(inputs=data,
outputs=[face_rpn_cls_prob_reshape_stride32,
face_rpn_bbox_pred_stride32,
face_rpn_landmark_pred_stride32,
face_rpn_cls_prob_reshape_stride16,
face_rpn_bbox_pred_stride16,
face_rpn_landmark_pred_stride16,
face_rpn_cls_prob_reshape_stride8,
face_rpn_bbox_pred_stride8,
face_rpn_landmark_pred_stride8
])
model = load_weights(model)
return model | /retina-face-0.0.13.tar.gz/retina-face-0.0.13/retinaface/model/retinaface_model.py | 0.664867 | 0.345547 | retinaface_model.py | pypi |
import re
from itertools import product
from typing import List, Tuple
import numpy as np
def nms(x1: np.ndarray, y1: np.ndarray, x2: np.ndarray, y2: np.ndarray,
scores: np.ndarray, thresh: float) -> List[int]:
b = 1
areas = (x2 - x1 + b) * (y2 - y1 + b)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + b)
h = np.maximum(0.0, yy2 - yy1 + b)
intersection = w * h
union = (areas[i] + areas[order[1:]] - intersection)
overlap = np.divide(intersection, union, out=np.zeros_like(intersection, dtype=float), where=union != 0)
order = order[np.where(overlap <= thresh)[0] + 1]
return keep
class RetinaFacePostPostprocessor:
def __init__(self, origin_image_size: Tuple[int, int], input_image_size: Tuple[int, int]):
self._origin_image_size = origin_image_size
self._input_image_size = input_image_size
self.nms_threshold = 0.3
self.face_prob_threshold = 0.5
self.variance = [0.1, 0.2]
def process_output(self, raw_output: np.ndarray):
bboxes_output = [raw_output[name][0] for name in raw_output if re.search('.bbox.', name)][0]
scores_output = [raw_output[name][0] for name in raw_output if re.search('.cls.', name)][0]
prior_data = self.generate_prior_data()
proposals = self._get_proposals(bboxes_output, prior_data)
scores = scores_output[:, 1]
filter_idx = np.where(scores > self.face_prob_threshold)[0]
proposals = proposals[filter_idx]
scores = scores[filter_idx]
if np.size(scores) > 0:
x_mins, y_mins, x_maxs, y_maxs = proposals.T
keep = nms(x_mins, y_mins, x_maxs, y_maxs, scores, self.nms_threshold)
proposals = proposals[keep]
scores = scores[keep]
result = []
if np.size(scores) != 0:
scores = np.reshape(scores, -1)
x_mins, y_mins, x_maxs, y_maxs = np.array(proposals).T
for x_min, y_min, x_max, y_max, score in zip(x_mins, y_mins, x_maxs, y_maxs, scores):
x_min *= self.scale_x
y_min *= self.scale_y
x_max *= self.scale_x
y_max *= self.scale_y
result.append((x_min, y_min, x_max, y_max, score))
return result
def generate_prior_data(self):
global_min_sizes = [[16, 32], [64, 128], [256, 512]]
steps = [8, 16, 32]
anchors = []
feature_maps = [[int(np.rint(self._input_image_size[1] / step)), int(np.rint(self._input_image_size[0] / step))]
for step in steps]
for idx, feature_map in enumerate(feature_maps):
min_sizes = global_min_sizes[idx]
for i, j in product(range(feature_map[0]), range(feature_map[1])):
for min_size in min_sizes:
s_kx = min_size / self._input_image_size[0]
s_ky = min_size / self._input_image_size[1]
dense_cx = [x * steps[idx] / self._input_image_size[0] for x in [j + 0.5]]
dense_cy = [y * steps[idx] / self._input_image_size[1] for y in [i + 0.5]]
for cy, cx in product(dense_cy, dense_cx):
anchors += [cx, cy, s_kx, s_ky]
priors = np.array(anchors).reshape((-1, 4))
return priors
def _get_proposals(self, raw_boxes, priors):
proposals = self.decode_boxes(raw_boxes, priors, self.variance)
proposals[:, ::2] = proposals[:, ::2] * self._input_image_size[0]
proposals[:, 1::2] = proposals[:, 1::2] * self._input_image_size[1]
return proposals
@staticmethod
def decode_boxes(raw_boxes, priors, variance):
boxes = np.concatenate((
priors[:, :2] + raw_boxes[:, :2] * variance[0] * priors[:, 2:],
priors[:, 2:] * np.exp(raw_boxes[:, 2:] * variance[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
@property
def scale_x(self) -> float:
return self._origin_image_size[0] / self._input_image_size[1]
@property
def scale_y(self) -> float:
return self._origin_image_size[1] / self._input_image_size[0] | /retinaface_post_processing-0.0.4.tar.gz/retinaface_post_processing-0.0.4/RetinaFacePostProcessing/retinaface_post_processing.py | 0.880399 | 0.533823 | retinaface_post_processing.py | pypi |
from __future__ import print_function
import argparse
import torch
import torch.backends.cudnn as cudnn
import numpy as np
import time
import cv2
from retinaface.data import cfg_mnet, cfg_re50
from retinaface.layers.functions.prior_box import PriorBox
from retinaface.utils.nms.py_cpu_nms import py_cpu_nms
from retinaface.models.retinaface import RetinaFace
from retinaface.utils.box_utils import decode, decode_landm
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu, url_file_name=None):
print('Loading pretrained model from {}'.format(pretrained_path))
url_flag = False
if pretrained_path[:8] == 'https://':
url_flag = True
if load_to_cpu:
if url_flag:
pretrained_dict = torch.hub.load_state_dict_from_url(pretrained_path,
map_location=lambda storage, loc: storage,
file_name=url_file_name)
else:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
if url_flag:
pretrained_dict = torch.hub.load_state_dict_from_url(pretrained_path,
map_location=lambda storage, loc: storage.cuda(device),
file_name=url_file_name)
else:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Retinaface')
parser.add_argument('-m', '--trained_model', default='./weights/Resnet50_Final.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--network', default='resnet50', help='Backbone network mobile0.25 or resnet50')
parser.add_argument('--cpu', action="store_true", default=False, help='Use cpu inference')
parser.add_argument('--confidence_threshold', default=0.02, type=float, help='confidence_threshold')
parser.add_argument('--top_k', default=5000, type=int, help='top_k')
parser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')
parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
parser.add_argument('-s', '--save_image', action="store_true", default=True, help='show detection results')
parser.add_argument('--vis_thres', default=0.6, type=float, help='visualization_threshold')
args = parser.parse_args()
torch.set_grad_enabled(False)
cfg = None
if args.network == "mobile0.25":
cfg = cfg_mnet
elif args.network == "resnet50":
cfg = cfg_re50
# net and model
net = RetinaFace(cfg=cfg, phase = 'test')
net = load_model(net, args.trained_model, args.cpu)
net.eval()
print('Finished loading model!')
print(net)
cudnn.benchmark = True
device = torch.device("cpu" if args.cpu else "cuda")
net = net.to(device)
resize = 1
# testing begin
for i in range(100):
image_path = "curve/test.jpg"
img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
img = np.float32(img_raw)
im_height, im_width, _ = img.shape
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
scale = scale.to(device)
tic = time.time()
loc, conf, landms = net(img) # forward pass
print('net forward time: {:.4f}'.format(time.time() - tic))
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])
scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2]])
scale1 = scale1.to(device)
landms = landms * scale1 / resize
landms = landms.cpu().numpy()
# ignore low scores
inds = np.where(scores > args.confidence_threshold)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:args.top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(dets, args.nms_threshold)
# keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)
dets = dets[keep, :]
landms = landms[keep]
# keep top-K faster NMS
dets = dets[:args.keep_top_k, :]
landms = landms[:args.keep_top_k, :]
dets = np.concatenate((dets, landms), axis=1)
# show image
if args.save_image:
for b in dets:
if b[4] < args.vis_thres:
continue
text = "{:.4f}".format(b[4])
b = list(map(int, b))
cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
cx = b[0]
cy = b[1] + 12
cv2.putText(img_raw, text, (cx, cy),
cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
# landms
cv2.circle(img_raw, (b[5], b[6]), 1, (0, 0, 255), 4)
cv2.circle(img_raw, (b[7], b[8]), 1, (0, 255, 255), 4)
cv2.circle(img_raw, (b[9], b[10]), 1, (255, 0, 255), 4)
cv2.circle(img_raw, (b[11], b[12]), 1, (0, 255, 0), 4)
cv2.circle(img_raw, (b[13], b[14]), 1, (255, 0, 0), 4)
# save image
name = "test.jpg"
cv2.imwrite(name, img_raw) | /retinaface_py-0.0.2-py3-none-any.whl/retinaface/detect.py | 0.567577 | 0.24121 | detect.py | pypi |
from __future__ import print_function
import argparse
import torch
from data import cfg_mnet, cfg_re50
from models.retinaface import RetinaFace
parser = argparse.ArgumentParser(description='Test')
parser.add_argument('-m', '--trained_model', default='./weights/mobilenet0.25_Final.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--network', default='mobile0.25', help='Backbone network mobile0.25 or resnet50')
parser.add_argument('--long_side', default=640, help='when origin_size is false, long_side is scaled size(320 or 640 for long side)')
parser.add_argument('--cpu', action="store_true", default=True, help='Use cpu inference')
args = parser.parse_args()
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
if __name__ == '__main__':
torch.set_grad_enabled(False)
cfg = None
if args.network == "mobile0.25":
cfg = cfg_mnet
elif args.network == "resnet50":
cfg = cfg_re50
# net and model
net = RetinaFace(cfg=cfg, phase = 'test')
net = load_model(net, args.trained_model, args.cpu)
net.eval()
print('Finished loading model!')
print(net)
device = torch.device("cpu" if args.cpu else "cuda")
net = net.to(device)
# ------------------------ export -----------------------------
output_onnx = 'FaceDetector.onnx'
print("==> Exporting model to ONNX format at '{}'".format(output_onnx))
input_names = ["input0"]
output_names = ["output0"]
inputs = torch.randn(1, 3, args.long_side, args.long_side).to(device)
torch_out = torch.onnx._export(net, inputs, output_onnx, export_params=True, verbose=False,
input_names=input_names, output_names=output_names) | /retinaface_py-0.0.2-py3-none-any.whl/retinaface/convert_to_onnx.py | 0.699665 | 0.219358 | convert_to_onnx.py | pypi |
import torch
import numpy as np
torch.set_grad_enabled(False)
# My libs
import retinaface.models.retinaface as rf_model
import retinaface.detect as rf_detect
import retinaface.data.config as rf_config
import retinaface.layers.functions.prior_box as rf_priors
import retinaface.utils.box_utils as rf_ubox
import retinaface.utils.nms.py_cpu_nms as rf_nms
# Default configs
cfg_postreat_dft = {'resize': 1.,
'score_thr': 0.75,
'top_k': 5000,
'nms_thr': 0.4,
'keep_top_k': 50}
class RetinaFaceDetector:
def __init__(self,
model='mobile0.25',
device='cuda',
extra_features=['landmarks'],
cfg_postreat=cfg_postreat_dft):
# Set model configuration
cfg = None
trained_model = None
if model == "mobile0.25":
cfg = rf_config.cfg_mnet
trained_model = "https://drive.google.com/uc?export=download&confirm=yes&id=1nxhtpdVLbmheUTwyIb733MrL53X4SQgQ"
url_model_name = "retinaface_mobile025.pth"
elif model == "resnet50":
cfg = rf_config.cfg_re50
trained_model = "https://drive.google.com/uc?export=download&confirm=yes&id=1a9SqFRkeTuJUwqerElCWJFrotZuDGVtT"
url_model_name = "retinaface_resnet50.pth"
else:
raise ValueError('Model configuration not found')
# Load net and model
cpu_flag = 'cpu' in device
net = rf_model.RetinaFace(cfg=cfg, phase='test')
net = rf_detect.load_model(net, trained_model, cpu_flag, url_file_name=url_model_name)
net.eval()
print('RetinaFace loaded!')
# Define detector variables
self.device = torch.device(device)
self.net = net.to(self.device)
self.cfg = cfg
self.features = ['bbox'] + extra_features
self.scale = {}
self.prior_data = None
# Postreatment configuration
self.cfg['postreat'] = cfg_postreat
def set_input_shape(self, im_height, im_width):
# Scales
scale_bbox = torch.Tensor([im_width, im_height, im_width, im_height])
self.scale['bbox'] = scale_bbox.to(self.device)
if 'landmarks' in self.features:
scale_lnd = torch.Tensor([im_width, im_height, im_width, im_height,
im_width, im_height, im_width, im_height,
im_width, im_height])
self.scale['landmarks'] = scale_lnd.to(self.device)
# Load priors
priorbox = rf_priors.PriorBox(self.cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(self.device)
self.prior_data = priors.data
def inference(self, image):
img = self._pretreatment(image)
loc, conf, lnd = self._net_forward(img)
features = self._postreatment(loc, conf, lnd)
return features
def _pretreatment(self, img_raw):
img = np.float32(img_raw)
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(self.device)
return img
def _net_forward(self, img):
loc, conf, landms = self.net(img)
return loc, conf, landms
def _postreatment(self, loc, conf, landms):
cfg_post = self.cfg['postreat']
boxes = rf_ubox.decode(loc.data.squeeze(0), self.prior_data, self.cfg['variance'])
boxes = boxes * self.scale['bbox'] / cfg_post['resize']
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = rf_ubox.decode_landm(landms.data.squeeze(0), self.prior_data, self.cfg['variance'])
landms = landms * self.scale['landmarks'] / cfg_post['resize']
landms = landms.cpu().numpy()
# Ignore low scores
inds = np.where(scores > cfg_post['score_thr'])[0]
boxes = boxes[inds]
scores = scores[inds]
# Keep top-K before NMS
order = scores.argsort()[::-1][:cfg_post['top_k']]
boxes = boxes[order]
scores = scores[order]
# NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = rf_nms.py_cpu_nms(dets, cfg_post['nms_thr'])
dets = dets[keep, :]
# keep top-K faster NMS
dets = dets[:cfg_post['keep_top_k'], :]
features = {'bbox': dets}
if 'landmarks' in self.features:
landms = landms[inds]
landms = landms[order]
landms = landms[keep]
landms = landms[:cfg_post['keep_top_k'], :]
landms = np.array(landms)
landms = np.expand_dims(landms, axis=-1)
landms = landms.reshape((-1, 5, 2))
features['landmarks'] = landms
return features | /retinaface_py-0.0.2-py3-none-any.whl/retinaface/inference_framework.py | 0.759404 | 0.220342 | inference_framework.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
def conv_bn(inp, oup, stride = 1, leaky = 0):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True)
)
def conv_bn_no_relu(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
)
def conv_bn1X1(inp, oup, stride, leaky=0):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True)
)
def conv_dw(inp, oup, stride, leaky=0.1):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.LeakyReLU(negative_slope= leaky,inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope= leaky,inplace=True),
)
class SSH(nn.Module):
def __init__(self, in_channel, out_channel):
super(SSH, self).__init__()
assert out_channel % 4 == 0
leaky = 0
if (out_channel <= 64):
leaky = 0.1
self.conv3X3 = conv_bn_no_relu(in_channel, out_channel//2, stride=1)
self.conv5X5_1 = conv_bn(in_channel, out_channel//4, stride=1, leaky = leaky)
self.conv5X5_2 = conv_bn_no_relu(out_channel//4, out_channel//4, stride=1)
self.conv7X7_2 = conv_bn(out_channel//4, out_channel//4, stride=1, leaky = leaky)
self.conv7x7_3 = conv_bn_no_relu(out_channel//4, out_channel//4, stride=1)
def forward(self, input):
conv3X3 = self.conv3X3(input)
conv5X5_1 = self.conv5X5_1(input)
conv5X5 = self.conv5X5_2(conv5X5_1)
conv7X7_2 = self.conv7X7_2(conv5X5_1)
conv7X7 = self.conv7x7_3(conv7X7_2)
out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1)
out = F.relu(out)
return out
class FPN(nn.Module):
def __init__(self,in_channels_list,out_channels):
super(FPN,self).__init__()
leaky = 0
if (out_channels <= 64):
leaky = 0.1
self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride = 1, leaky = leaky)
self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride = 1, leaky = leaky)
self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride = 1, leaky = leaky)
self.merge1 = conv_bn(out_channels, out_channels, leaky = leaky)
self.merge2 = conv_bn(out_channels, out_channels, leaky = leaky)
def forward(self, input):
# names = list(input.keys())
input = list(input.values())
output1 = self.output1(input[0])
output2 = self.output2(input[1])
output3 = self.output3(input[2])
up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode="nearest")
output2 = output2 + up3
output2 = self.merge2(output2)
up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode="nearest")
output1 = output1 + up2
output1 = self.merge1(output1)
out = [output1, output2, output3]
return out
class MobileNetV1(nn.Module):
def __init__(self):
super(MobileNetV1, self).__init__()
self.stage1 = nn.Sequential(
conv_bn(3, 8, 2, leaky = 0.1), # 3
conv_dw(8, 16, 1), # 7
conv_dw(16, 32, 2), # 11
conv_dw(32, 32, 1), # 19
conv_dw(32, 64, 2), # 27
conv_dw(64, 64, 1), # 43
)
self.stage2 = nn.Sequential(
conv_dw(64, 128, 2), # 43 + 16 = 59
conv_dw(128, 128, 1), # 59 + 32 = 91
conv_dw(128, 128, 1), # 91 + 32 = 123
conv_dw(128, 128, 1), # 123 + 32 = 155
conv_dw(128, 128, 1), # 155 + 32 = 187
conv_dw(128, 128, 1), # 187 + 32 = 219
)
self.stage3 = nn.Sequential(
conv_dw(128, 256, 2), # 219 +3 2 = 241
conv_dw(256, 256, 1), # 241 + 64 = 301
)
self.avg = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(256, 1000)
def forward(self, x):
x = self.stage1(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.avg(x)
# x = self.model(x)
x = x.view(-1, 256)
x = self.fc(x)
return x | /retinaface_py-0.0.2-py3-none-any.whl/retinaface/models/net.py | 0.913505 | 0.497192 | net.py | pypi |
import torch
import torch.nn as nn
import torchvision.models._utils as _utils
import torch.nn.functional as F
from retinaface.models.net import MobileNetV1 as MobileNetV1
from retinaface.models.net import FPN as FPN
from retinaface.models.net import SSH as SSH
class ClassHead(nn.Module):
def __init__(self,inchannels=512,num_anchors=3):
super(ClassHead,self).__init__()
self.num_anchors = num_anchors
self.conv1x1 = nn.Conv2d(inchannels,self.num_anchors*2,kernel_size=(1,1),stride=1,padding=0)
def forward(self,x):
out = self.conv1x1(x)
out = out.permute(0,2,3,1).contiguous()
return out.view(out.shape[0], -1, 2)
class BboxHead(nn.Module):
def __init__(self,inchannels=512,num_anchors=3):
super(BboxHead,self).__init__()
self.conv1x1 = nn.Conv2d(inchannels,num_anchors*4,kernel_size=(1,1),stride=1,padding=0)
def forward(self,x):
out = self.conv1x1(x)
out = out.permute(0,2,3,1).contiguous()
return out.view(out.shape[0], -1, 4)
class LandmarkHead(nn.Module):
def __init__(self,inchannels=512,num_anchors=3):
super(LandmarkHead,self).__init__()
self.conv1x1 = nn.Conv2d(inchannels,num_anchors*10,kernel_size=(1,1),stride=1,padding=0)
def forward(self,x):
out = self.conv1x1(x)
out = out.permute(0,2,3,1).contiguous()
return out.view(out.shape[0], -1, 10)
class RetinaFace(nn.Module):
def __init__(self, cfg = None, phase = 'train'):
"""
:param cfg: Network related settings.
:param phase: train or test.
"""
super(RetinaFace,self).__init__()
self.phase = phase
backbone = None
if cfg['name'] == 'mobilenet0.25':
backbone = MobileNetV1()
elif cfg['name'] == 'Resnet50':
import torchvision.models as models
backbone = models.resnet50(pretrained=cfg['pretrain'])
self.body = _utils.IntermediateLayerGetter(backbone, cfg['return_layers'])
in_channels_stage2 = cfg['in_channel']
in_channels_list = [
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
out_channels = cfg['out_channel']
self.fpn = FPN(in_channels_list,out_channels)
self.ssh1 = SSH(out_channels, out_channels)
self.ssh2 = SSH(out_channels, out_channels)
self.ssh3 = SSH(out_channels, out_channels)
self.ClassHead = self._make_class_head(fpn_num=3, inchannels=cfg['out_channel'])
self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])
self.LandmarkHead = self._make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])
def _make_class_head(self,fpn_num=3,inchannels=64,anchor_num=2):
classhead = nn.ModuleList()
for i in range(fpn_num):
classhead.append(ClassHead(inchannels,anchor_num))
return classhead
def _make_bbox_head(self,fpn_num=3,inchannels=64,anchor_num=2):
bboxhead = nn.ModuleList()
for i in range(fpn_num):
bboxhead.append(BboxHead(inchannels,anchor_num))
return bboxhead
def _make_landmark_head(self,fpn_num=3,inchannels=64,anchor_num=2):
landmarkhead = nn.ModuleList()
for i in range(fpn_num):
landmarkhead.append(LandmarkHead(inchannels,anchor_num))
return landmarkhead
def forward(self,inputs):
out = self.body(inputs)
# FPN
fpn = self.fpn(out)
# SSH
feature1 = self.ssh1(fpn[0])
feature2 = self.ssh2(fpn[1])
feature3 = self.ssh3(fpn[2])
features = [feature1, feature2, feature3]
bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)
classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)],dim=1)
ldm_regressions = torch.cat([self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1)
if self.phase == 'train':
output = (bbox_regressions, classifications, ldm_regressions)
else:
output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
return output | /retinaface_py-0.0.2-py3-none-any.whl/retinaface/models/retinaface.py | 0.913368 | 0.307118 | retinaface.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
from retinaface.utils.box_utils import match, log_sum_exp
from retinaface.data import cfg_mnet
GPU = cfg_mnet['gpu_train']
class MultiBoxLoss(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap, encode_target):
super(MultiBoxLoss, self).__init__()
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
self.variance = [0.1, 0.2]
def forward(self, predictions, priors, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
ground_truth (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
loc_data, conf_data, landm_data = predictions
priors = priors
num = loc_data.size(0)
num_priors = (priors.size(0))
# match priors (default boxes) and ground truth boxes
loc_t = torch.Tensor(num, num_priors, 4)
landm_t = torch.Tensor(num, num_priors, 10)
conf_t = torch.LongTensor(num, num_priors)
for idx in range(num):
truths = targets[idx][:, :4].data
labels = targets[idx][:, -1].data
landms = targets[idx][:, 4:14].data
defaults = priors.data
match(self.threshold, truths, defaults, self.variance, labels, landms, loc_t, conf_t, landm_t, idx)
if GPU:
loc_t = loc_t.cuda()
conf_t = conf_t.cuda()
landm_t = landm_t.cuda()
zeros = torch.tensor(0).cuda()
# landm Loss (Smooth L1)
# Shape: [batch,num_priors,10]
pos1 = conf_t > zeros
num_pos_landm = pos1.long().sum(1, keepdim=True)
N1 = max(num_pos_landm.data.sum().float(), 1)
pos_idx1 = pos1.unsqueeze(pos1.dim()).expand_as(landm_data)
landm_p = landm_data[pos_idx1].view(-1, 10)
landm_t = landm_t[pos_idx1].view(-1, 10)
loss_landm = F.smooth_l1_loss(landm_p, landm_t, reduction='sum')
pos = conf_t != zeros
conf_t[pos] = 1
# Localization Loss (Smooth L1)
# Shape: [batch,num_priors,4]
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)
loc_p = loc_data[pos_idx].view(-1, 4)
loc_t = loc_t[pos_idx].view(-1, 4)
loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')
# Compute max conf across batch for hard negative mining
batch_conf = conf_data.view(-1, self.num_classes)
loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
# Hard Negative Mining
loss_c[pos.view(-1, 1)] = 0 # filter out pos boxes for now
loss_c = loss_c.view(num, -1)
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)
neg = idx_rank < num_neg.expand_as(idx_rank)
# Confidence Loss Including Positive and Negative Examples
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1,self.num_classes)
targets_weighted = conf_t[(pos+neg).gt(0)]
loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')
# Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
N = max(num_pos.data.sum().float(), 1)
loss_l /= N
loss_c /= N
loss_landm /= N1
return loss_l, loss_c, loss_landm | /retinaface_py-0.0.2-py3-none-any.whl/retinaface/layers/modules/multibox_loss.py | 0.923648 | 0.691706 | multibox_loss.py | pypi |
import torch
import torch.utils.data as data
import cv2
import numpy as np
class WiderFaceDetection(data.Dataset):
def __init__(self, txt_path, preproc=None):
self.preproc = preproc
self.imgs_path = []
self.words = []
f = open(txt_path,'r')
lines = f.readlines()
isFirst = True
labels = []
for line in lines:
line = line.rstrip()
if line.startswith('#'):
if isFirst is True:
isFirst = False
else:
labels_copy = labels.copy()
self.words.append(labels_copy)
labels.clear()
path = line[2:]
path = txt_path.replace('label.txt','images/') + path
self.imgs_path.append(path)
else:
line = line.split(' ')
label = [float(x) for x in line]
labels.append(label)
self.words.append(labels)
def __len__(self):
return len(self.imgs_path)
def __getitem__(self, index):
img = cv2.imread(self.imgs_path[index])
height, width, _ = img.shape
labels = self.words[index]
annotations = np.zeros((0, 15))
if len(labels) == 0:
return annotations
for idx, label in enumerate(labels):
annotation = np.zeros((1, 15))
# bbox
annotation[0, 0] = label[0] # x1
annotation[0, 1] = label[1] # y1
annotation[0, 2] = label[0] + label[2] # x2
annotation[0, 3] = label[1] + label[3] # y2
# landmarks
annotation[0, 4] = label[4] # l0_x
annotation[0, 5] = label[5] # l0_y
annotation[0, 6] = label[7] # l1_x
annotation[0, 7] = label[8] # l1_y
annotation[0, 8] = label[10] # l2_x
annotation[0, 9] = label[11] # l2_y
annotation[0, 10] = label[13] # l3_x
annotation[0, 11] = label[14] # l3_y
annotation[0, 12] = label[16] # l4_x
annotation[0, 13] = label[17] # l4_y
if (annotation[0, 4]<0):
annotation[0, 14] = -1
else:
annotation[0, 14] = 1
annotations = np.append(annotations, annotation, axis=0)
target = np.array(annotations)
if self.preproc is not None:
img, target = self.preproc(img, target)
return torch.from_numpy(img), target
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
targets = []
imgs = []
for _, sample in enumerate(batch):
for _, tup in enumerate(sample):
if torch.is_tensor(tup):
imgs.append(tup)
elif isinstance(tup, type(np.empty(0))):
annos = torch.from_numpy(tup).float()
targets.append(annos)
return (torch.stack(imgs, 0), targets) | /retinaface_py-0.0.2-py3-none-any.whl/retinaface/data/wider_face.py | 0.637482 | 0.325346 | wider_face.py | pypi |
# Retinaface
[](https://zenodo.org/badge/latestdoi/280950959)

This repo is build on top of [https://github.com/biubug6/Pytorch_Retinaface](https://github.com/biubug6/Pytorch_Retinaface)
## Differences
### Train loop moved to [Pytorch Lightning](https://github.com/PyTorchLightning/pytorch-lightning)
IT added a set of functionality:
* Distributed training
* fp16
* Syncronized BatchNorm
* Support for various loggers like [W&B](https://www.wandb.com/) or [Neptune.ml](https://neptune.ai/)
### Hyperparameters are defined in the config file
Hyperparameters that were scattered across the code moved to the config at [retinadace/config](retinadace/config)
### Augmentations => [Albumentations](https://albumentations.ai/)
Color that were manually implemented replaced by the Albumentations library.
Todo:
* Horizontal Flip is not implemented in Albumentations
* Spatial transforms like rotations or transpose are not implemented yet.
Color transforms defined in the config.
### Added mAP calculation for validation
In order to track the progress, mAP metric is calculated on validation.
## Installation
`pip install -U retinaface_pytorch`
## Example inference
```python
import cv2
from retinaface.pre_trained_models import get_model
```
`image = <numpy array with shape (height, width, 3)>`
```python
model = get_model("resnet50_2020-07-20", max_size=2048)
model.eval()
annotation = model.predict_jsons(image)
```
* Jupyter notebook with the example: [](https://colab.research.google.com/drive/1wLXZyoybDRKizfcIzxPwkeYp-XDpTM-K?usp=sharing)
* Jupyter notebook with the example on how to combine face detector with mask detector: [](https://colab.research.google.com/drive/13Ktsrx164eQHfDmYLyMCoI-Kq0gC5Kg1?usp=sharing)
## Data Preparation
The pipeline expects labels in the format:
```
[
{
"file_name": "0--Parade/0_Parade_marchingband_1_849.jpg",
"annotations": [
{
"bbox": [
449,
330,
571,
720
],
"landmarks": [
[
488.906,
373.643
],
[
542.089,
376.442
],
[
515.031,
412.83
],
[
485.174,
425.893
],
[
538.357,
431.491
]
]
}
]
},
```
You can convert the default labels of the WiderFaces to the json of the propper format with this [script](https://github.com/ternaus/iglovikov_helper_functions/blob/master/iglovikov_helper_functions/data_processing/wider_face/prepare_data.py).
## Training
### Install dependencies
```
pip install -r requirements.txt
pip install -r requirements_dev.txt
```
### Define config
Example configs could be found at [retinaface/configs](retinaface/configs)
### Define environmental variables
```bash
export TRAIN_IMAGE_PATH=<path to train images>
export VAL_IMAGE_PATH=<path to validation images>
export TRAIN_LABEL_PATH=<path to train annotations>
export VAL_LABEL_PATH=<path to validation annotations>
```
### Run training script
```
python retinaface/train.py -h
usage: train.py [-h] -c CONFIG_PATH
optional arguments:
-h, --help show this help message and exit
-c CONFIG_PATH, --config_path CONFIG_PATH
Path to the config.
```
## Inference
```
python retinaface/inference.py -h
usage: inference.py [-h] -i INPUT_PATH -c CONFIG_PATH -o OUTPUT_PATH [-v]
[-g NUM_GPUS] [-m MAX_SIZE] [-b BATCH_SIZE]
[-j NUM_WORKERS]
[--confidence_threshold CONFIDENCE_THRESHOLD]
[--nms_threshold NMS_THRESHOLD] -w WEIGHT_PATH
[--keep_top_k KEEP_TOP_K] [--world_size WORLD_SIZE]
[--local_rank LOCAL_RANK] [--fp16]
optional arguments:
-h, --help show this help message and exit
-i INPUT_PATH, --input_path INPUT_PATH
Path with images.
-c CONFIG_PATH, --config_path CONFIG_PATH
Path to config.
-o OUTPUT_PATH, --output_path OUTPUT_PATH
Path to save jsons.
-v, --visualize Visualize predictions
-g NUM_GPUS, --num_gpus NUM_GPUS
The number of GPUs to use.
-m MAX_SIZE, --max_size MAX_SIZE
Resize the largest side to this number
-b BATCH_SIZE, --batch_size BATCH_SIZE
batch_size
-j NUM_WORKERS, --num_workers NUM_WORKERS
num_workers
--confidence_threshold CONFIDENCE_THRESHOLD
confidence_threshold
--nms_threshold NMS_THRESHOLD
nms_threshold
-w WEIGHT_PATH, --weight_path WEIGHT_PATH
Path to weights.
--keep_top_k KEEP_TOP_K
keep_top_k
--world_size WORLD_SIZE
number of nodes for distributed training
--local_rank LOCAL_RANK
node rank for distributed training
--fp16 Use fp6
```
```
python -m torch.distributed.launch --nproc_per_node=<num_gpus> retinaface/inference.py <parameters>
```
* [Weights](https://drive.google.com/drive/folders/1DuiwlTd1BbZ0ZzafrV7qMncko1Z5a412?usp=sharing) for the model with [config](retinaface/configs/2020-07-19.yaml).
* [Weights](https://drive.google.com/file/d/1slNNW1bntYqDKpvi2r1QfcQAwnhsVw9n/view?usp=sharing) for the model with [config](retinaface/configs/2020-07-20.yaml).
# Web app
https://retinaface.herokuapp.com/
Code for the web app: https://github.com/ternaus/retinaface_demo
### Converting to ONNX
The inference could be sped up on CPU by converting the model to ONNX.
```
Ex: python -m converters.to_onnx -m 1280 -o retinaface1280.onnx
```
| /retinaface_pytorch-0.0.8.tar.gz/retinaface_pytorch-0.0.8/README.md | 0.481454 | 0.886764 | README.md | pypi |
import argparse
import os
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, List, Tuple, Union
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import yaml
from addict import Dict as Adict
from albumentations.core.serialization import from_dict
from iglovikov_helper_functions.config_parsing.utils import object_from_dict
from iglovikov_helper_functions.metrics.map import recall_precision
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.loggers import WandbLogger
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from torchvision.ops import nms
from retinaface.box_utils import decode
from retinaface.data_augment import Preproc
from retinaface.dataset import FaceDetectionDataset, detection_collate
TRAIN_IMAGE_PATH = Path(os.environ["TRAIN_IMAGE_PATH"])
VAL_IMAGE_PATH = Path(os.environ["VAL_IMAGE_PATH"])
TRAIN_LABEL_PATH = Path(os.environ["TRAIN_LABEL_PATH"])
VAL_LABEL_PATH = Path(os.environ["VAL_LABEL_PATH"])
def get_args() -> Any:
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg("-c", "--config_path", type=Path, help="Path to the config.", required=True)
return parser.parse_args()
class RetinaFace(pl.LightningModule): # pylint: disable=R0901
def __init__(self, config: Adict[str, Any]) -> None:
super().__init__()
self.config = config
self.prior_box = object_from_dict(self.config.prior_box, image_size=self.config.image_size)
self.model = object_from_dict(self.config.model)
self.loss_weights = self.config.loss_weights
self.loss = object_from_dict(self.config.loss, priors=self.prior_box)
def setup(self, stage=0) -> None: # type: ignore
self.preproc = Preproc(img_dim=self.config.image_size[0])
def forward(self, batch: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: # type: ignore
return self.model(batch)
def train_dataloader(self) -> DataLoader:
result = DataLoader(
FaceDetectionDataset(
label_path=TRAIN_LABEL_PATH,
image_path=TRAIN_IMAGE_PATH,
transform=from_dict(self.config.train_aug),
preproc=self.preproc,
rotate90=self.config.train_parameters.rotate90,
),
batch_size=self.config.train_parameters.batch_size,
num_workers=self.config.num_workers,
shuffle=True,
pin_memory=True,
drop_last=False,
collate_fn=detection_collate,
)
return result
def val_dataloader(self) -> DataLoader:
result = DataLoader(
FaceDetectionDataset(
label_path=VAL_LABEL_PATH,
image_path=VAL_IMAGE_PATH,
transform=from_dict(self.config.val_aug),
preproc=self.preproc,
rotate90=self.config.val_parameters.rotate90,
),
batch_size=self.config.val_parameters.batch_size,
num_workers=self.config.num_workers,
shuffle=False,
pin_memory=True,
drop_last=True,
collate_fn=detection_collate,
)
return result
def configure_optimizers(
self,
) -> Tuple[Callable[[bool], Union[Optimizer, List[Optimizer], List[LightningOptimizer]]], List[Any]]:
optimizer = object_from_dict(
self.config.optimizer, params=[x for x in self.model.parameters() if x.requires_grad]
)
scheduler = object_from_dict(self.config.scheduler, optimizer=optimizer)
self.optimizers = [optimizer] # type: ignore
return self.optimizers, [scheduler] # type: ignore
def training_step(self, batch: Dict[str, torch.Tensor], batch_idx: int): # type: ignore
images = batch["image"]
targets = batch["annotation"]
out = self.forward(images)
loss_localization, loss_classification, loss_landmarks = self.loss(out, targets)
total_loss = (
self.loss_weights["localization"] * loss_localization
+ self.loss_weights["classification"] * loss_classification
+ self.loss_weights["landmarks"] * loss_landmarks
)
self.log("train_classification", loss_classification, on_step=True, on_epoch=True, logger=True, prog_bar=True)
self.log("train_localization", loss_localization, on_step=True, on_epoch=True, logger=True, prog_bar=True)
self.log("train_landmarks", loss_landmarks, on_step=True, on_epoch=True, logger=True, prog_bar=True)
self.log("train_loss", total_loss, on_step=True, on_epoch=True, logger=True, prog_bar=True)
self.log("lr", self._get_current_lr(), on_step=True, on_epoch=True, logger=True, prog_bar=True)
return total_loss
def validation_step(self, batch: Dict[str, torch.Tensor], batch_idx: int): # type: ignore
images = batch["image"]
image_height = images.shape[2]
image_width = images.shape[3]
annotations = batch["annotation"]
file_names = batch["file_name"]
out = self.forward(images)
location, confidence, _ = out
confidence = F.softmax(confidence, dim=-1)
batch_size = location.shape[0]
predictions_coco: List[Dict[str, Any]] = []
scale = torch.from_numpy(np.tile([image_width, image_height], 2)).to(location.device)
for batch_id in range(batch_size):
boxes = decode(
location.data[batch_id], self.prior_box.to(images.device), self.config.test_parameters.variance
)
scores = confidence[batch_id][:, 1]
valid_index = torch.where(scores > 0.1)[0]
boxes = boxes[valid_index]
scores = scores[valid_index]
boxes *= scale
# do NMS
keep = nms(boxes, scores, self.config.val_parameters.iou_threshold)
boxes = boxes[keep, :].cpu().numpy()
if boxes.shape[0] == 0:
continue
scores = scores[keep].cpu().numpy()
file_name = file_names[batch_id]
for box_id, bbox in enumerate(boxes):
x_min, y_min, x_max, y_max = bbox
x_min = np.clip(x_min, 0, x_max - 1)
y_min = np.clip(y_min, 0, y_max - 1)
predictions_coco += [
{
"id": str(hash(f"{file_name}_{box_id}")),
"image_id": file_name,
"category_id": 1,
"bbox": [x_min, y_min, x_max - x_min, y_max - y_min],
"score": scores[box_id],
}
]
gt_coco: List[Dict[str, Any]] = []
for batch_id, annotation_list in enumerate(annotations):
for annotation in annotation_list:
x_min, y_min, x_max, y_max = annotation[:4]
file_name = file_names[batch_id]
gt_coco += [
{
"id": str(hash(f"{file_name}_{batch_id}")),
"image_id": file_name,
"category_id": 1,
"bbox": [
x_min.item() * image_width,
y_min.item() * image_height,
(x_max - x_min).item() * image_width,
(y_max - y_min).item() * image_height,
],
}
]
return OrderedDict({"predictions": predictions_coco, "gt": gt_coco})
def validation_epoch_end(self, outputs: List) -> None:
result_predictions: List[dict] = []
result_gt: List[dict] = []
for output in outputs:
result_predictions += output["predictions"]
result_gt += output["gt"]
_, _, average_precision = recall_precision(result_gt, result_predictions, 0.5)
self.log("epoch", self.trainer.current_epoch, on_step=False, on_epoch=True, logger=True) # type: ignore
self.log("val_loss", average_precision, on_step=False, on_epoch=True, logger=True)
def _get_current_lr(self) -> torch.Tensor: # type: ignore
lr = [x["lr"] for x in self.optimizers[0].param_groups][0] # type: ignore
return torch.from_numpy(np.array([lr]))[0].to(self.device)
def main() -> None:
args = get_args()
with args.config_path.open() as f:
config = Adict(yaml.load(f, Loader=yaml.SafeLoader))
pl.trainer.seed_everything(config.seed)
pipeline = RetinaFace(config)
Path(config.checkpoint_callback.filepath).mkdir(exist_ok=True, parents=True)
trainer = object_from_dict(
config.trainer,
logger=WandbLogger(config.experiment_name),
checkpoint_callback=object_from_dict(config.checkpoint_callback),
)
trainer.fit(pipeline)
if __name__ == "__main__":
main() | /retinaface_pytorch-0.0.8.tar.gz/retinaface_pytorch-0.0.8/retinaface/train.py | 0.897482 | 0.353289 | train.py | pypi |
from typing import Dict, List
import torch
import torch.nn.functional as F
from torch import nn
def conv_bn(inp: int, oup: int, stride: int = 1, leaky: float = 0) -> nn.Sequential:
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True),
)
def conv_bn_no_relu(inp: int, oup: int, stride: int) -> nn.Sequential:
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
)
def conv_bn1X1(inp: int, oup: int, stride: int, leaky: float = 0) -> nn.Sequential:
return nn.Sequential(
nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True),
)
def conv_dw(inp: int, oup: int, stride: int, leaky: float = 0.1) -> nn.Sequential:
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.LeakyReLU(negative_slope=leaky, inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True),
)
class SSH(nn.Module):
def __init__(self, in_channel: int, out_channel: int) -> None:
super().__init__()
if out_channel % 4 != 0:
raise ValueError(f"Expect out channel % 4 == 0, but we got {out_channel % 4}")
leaky: float = 0
if out_channel <= 64:
leaky = 0.1
self.conv3X3 = conv_bn_no_relu(in_channel, out_channel // 2, stride=1)
self.conv5X5_1 = conv_bn(in_channel, out_channel // 4, stride=1, leaky=leaky)
self.conv5X5_2 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)
self.conv7X7_2 = conv_bn(out_channel // 4, out_channel // 4, stride=1, leaky=leaky)
self.conv7x7_3 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
conv3X3 = self.conv3X3(x)
conv5X5_1 = self.conv5X5_1(x)
conv5X5 = self.conv5X5_2(conv5X5_1)
conv7X7_2 = self.conv7X7_2(conv5X5_1)
conv7X7 = self.conv7x7_3(conv7X7_2)
out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1)
return F.relu(out)
class FPN(nn.Module):
def __init__(self, in_channels_list: List[int], out_channels: int) -> None:
super().__init__()
leaky = 0.0
if out_channels <= 64:
leaky = 0.1
self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride=1, leaky=leaky)
self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride=1, leaky=leaky)
self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride=1, leaky=leaky)
self.merge1 = conv_bn(out_channels, out_channels, leaky=leaky)
self.merge2 = conv_bn(out_channels, out_channels, leaky=leaky)
def forward(self, x: Dict[str, torch.Tensor]) -> List[torch.Tensor]:
y = list(x.values())
output1 = self.output1(y[0])
output2 = self.output2(y[1])
output3 = self.output3(y[2])
up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode="nearest")
output2 = output2 + up3
output2 = self.merge2(output2)
up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode="nearest")
output1 = output1 + up2
output1 = self.merge1(output1)
return [output1, output2, output3] | /retinaface_pytorch-0.0.8.tar.gz/retinaface_pytorch-0.0.8/retinaface/net.py | 0.952552 | 0.531817 | net.py | pypi |
import random
from typing import Tuple
import numpy as np
from retinaface.box_utils import matrix_iof
def random_crop(
image: np.ndarray, boxes: np.ndarray, labels: np.ndarray, landm: np.ndarray, img_dim: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, bool]:
"""Crop random patch.
if random.uniform(0, 1) <= 0.2:
scale = 1.0
else:
scale = random.uniform(0.3, 1.0)
"""
height, width = image.shape[:2]
pad_image_flag = True
for _ in range(250):
pre_scales = [0.3, 0.45, 0.6, 0.8, 1.0]
scale = random.choice(pre_scales)
short_side = min(width, height)
w = int(scale * short_side)
h = w
if width == w:
unclear_variable = 0
else:
unclear_variable = random.randrange(width - w)
if height == h:
t = 0
else:
t = random.randrange(height - h)
roi = np.array((unclear_variable, t, unclear_variable + w, t + h))
value = matrix_iof(boxes, roi[np.newaxis])
flag = value >= 1
if not flag.any():
continue
centers = (boxes[:, :2] + boxes[:, 2:]) / 2
mask_a = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1)
boxes_t = boxes[mask_a].copy()
labels_t = labels[mask_a].copy()
landms_t = landm[mask_a].copy()
landms_t = landms_t.reshape([-1, 5, 2])
if boxes_t.shape[0] == 0:
continue
image_t = image[roi[1] : roi[3], roi[0] : roi[2]]
boxes_t[:, :2] = np.maximum(boxes_t[:, :2], roi[:2])
boxes_t[:, :2] = boxes_t[:, :2] - roi[:2]
boxes_t[:, 2:] = np.minimum(boxes_t[:, 2:], roi[2:])
boxes_t[:, 2:] = boxes_t[:, 2:] - roi[:2]
# landm
landms_t[:, :, :2] = landms_t[:, :, :2] - roi[:2]
landms_t[:, :, :2] = np.maximum(landms_t[:, :, :2], np.array([0, 0]))
landms_t[:, :, :2] = np.minimum(landms_t[:, :, :2], roi[2:] - roi[:2])
landms_t = landms_t.reshape([-1, 10])
# make sure that the cropped image contains at least one face > 16 pixel at training image scale
b_w_t = (boxes_t[:, 2] - boxes_t[:, 0] + 1) / w * img_dim
b_h_t = (boxes_t[:, 3] - boxes_t[:, 1] + 1) / h * img_dim
mask_b = np.minimum(b_w_t, b_h_t) > 0.0
boxes_t = boxes_t[mask_b]
labels_t = labels_t[mask_b]
landms_t = landms_t[mask_b]
if boxes_t.shape[0] == 0:
continue
pad_image_flag = False
return image_t, boxes_t, labels_t, landms_t, pad_image_flag
return image, boxes, labels, landm, pad_image_flag
def random_horizontal_flip(
image: np.ndarray, boxes: np.ndarray, landms: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
width = image.shape[1]
if random.randrange(2):
image = image[:, ::-1]
boxes = boxes.copy()
boxes[:, 0::2] = width - boxes[:, 2::-2]
# landm
landms = landms.copy()
landms = landms.reshape([-1, 5, 2])
landms[:, :, 0] = width - landms[:, :, 0]
tmp = landms[:, 1, :].copy()
landms[:, 1, :] = landms[:, 0, :]
landms[:, 0, :] = tmp
tmp1 = landms[:, 4, :].copy()
landms[:, 4, :] = landms[:, 3, :]
landms[:, 3, :] = tmp1
landms = landms.reshape([-1, 10])
return image, boxes, landms
def _pad_to_square(image: np.ndarray, pad_image_flag: bool) -> np.ndarray:
if not pad_image_flag:
return image
height, width = image.shape[:2]
long_side = max(width, height)
image_t = np.zeros((long_side, long_side, 3), dtype=image.dtype)
image_t[:height, :width] = image
return image_t
class Preproc:
def __init__(self, img_dim: int) -> None:
self.img_dim = img_dim
def __call__(self, image: np.ndarray, targets: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
if targets.shape[0] == 0:
raise ValueError("this image does not have gt")
boxes = targets[:, :4].copy()
landmarks = targets[:, 4:-1].copy()
labels = targets[:, -1:].copy()
image_t, boxes_t, labels_t, landmarks_t, pad_image_flag = random_crop(
image, boxes, labels, landmarks, self.img_dim
)
image_t = _pad_to_square(image_t, pad_image_flag)
image_t, boxes_t, landmarks_t = random_horizontal_flip(image_t, boxes_t, landmarks_t)
height, width = image_t.shape[:2]
boxes_t[:, 0::2] = boxes_t[:, 0::2] / width
boxes_t[:, 1::2] = boxes_t[:, 1::2] / height
landmarks_t[:, 0::2] = landmarks_t[:, 0::2] / width
landmarks_t[:, 1::2] = landmarks_t[:, 1::2] / height
targets_t = np.hstack((boxes_t, landmarks_t, labels_t))
return image_t, targets_t | /retinaface_pytorch-0.0.8.tar.gz/retinaface_pytorch-0.0.8/retinaface/data_augment.py | 0.687315 | 0.653106 | data_augment.py | pypi |
import json
from pathlib import Path
from typing import Any, Dict, List, Tuple
import albumentations as albu
import numpy as np
import torch
from iglovikov_helper_functions.dl.pytorch.utils import tensor_from_rgb_image
from iglovikov_helper_functions.utils.image_utils import load_rgb
from torch.utils import data
from retinaface.data_augment import Preproc
class FaceDetectionDataset(data.Dataset):
def __init__(
self,
label_path: Path,
image_path: Path,
transform: albu.Compose,
preproc: Preproc,
rotate90: bool = False,
) -> None:
self.preproc = preproc
self.image_path = Path(image_path)
self.transform = transform
self.rotate90 = rotate90
with label_path.open() as f:
labels = json.load(f)
self.labels = [x for x in labels if (image_path / x["file_name"]).exists()]
def __len__(self) -> int:
return len(self.labels)
def __getitem__(self, index: int) -> Dict[str, Any]:
labels = self.labels[index]
file_name = labels["file_name"]
image = load_rgb(self.image_path / file_name)
image_height, image_width = image.shape[:2]
# annotations will have the format
# 4: box, 10 landmarks, 1: landmarks / no landmarks
num_annotations = 4 + 10 + 1
annotations = np.zeros((0, num_annotations))
for label in labels["annotations"]:
annotation = np.zeros((1, num_annotations))
x_min, y_min, x_max, y_max = label["bbox"]
x_min = np.clip(x_min, 0, image_width - 1)
y_min = np.clip(y_min, 0, image_height - 1)
x_max = np.clip(x_max, x_min + 1, image_width - 1)
y_max = np.clip(y_max, y_min, image_height - 1)
annotation[0, :4] = x_min, y_min, x_max, y_max
if "landmarks" in label and label["landmarks"]:
landmarks = np.array(label["landmarks"])
# landmarks
annotation[0, 4:14] = landmarks.reshape(-1, 10)
if annotation[0, 4] < 0:
annotation[0, 14] = -1
else:
annotation[0, 14] = 1
annotations = np.append(annotations, annotation, axis=0)
if self.rotate90:
image, annotations = random_rotate_90(image, annotations.astype(int))
image, annotations = self.preproc(image, annotations)
image = self.transform(image=image)["image"]
return {
"image": tensor_from_rgb_image(image),
"annotation": annotations.astype(np.float32),
"file_name": file_name,
}
def random_rotate_90(image: np.ndarray, annotations: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
image_height, image_width = image.shape[:2]
boxes = annotations[:, :4]
keypoints = annotations[:, 4:-1].reshape(-1, 2)
labels = annotations[:, -1:]
invalid_index = keypoints.sum(axis=1) == -2
keypoints[:, 0] = np.clip(keypoints[:, 0], 0, image_width - 1)
keypoints[:, 1] = np.clip(keypoints[:, 1], 0, image_height - 1)
keypoints[invalid_index] = 0
category_ids = list(range(boxes.shape[0]))
transform = albu.Compose(
[albu.RandomRotate90(p=1)],
keypoint_params=albu.KeypointParams(format="xy"),
bbox_params=albu.BboxParams(format="pascal_voc", label_fields=["category_ids"]),
)
transformed = transform(
image=image, keypoints=keypoints.tolist(), bboxes=boxes.tolist(), category_ids=category_ids
)
keypoints = np.array(transformed["keypoints"])
keypoints[invalid_index] = -1
keypoints = keypoints.reshape(-1, 10)
boxes = np.array(transformed["bboxes"])
image = transformed["image"]
annotations = np.hstack([boxes, keypoints, labels])
return image, annotations
def detection_collate(batch: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Custom collate fn for dealing with batches of images that have a different number of boxes.
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
annotation = []
images = []
file_names = []
for sample in batch:
images.append(sample["image"])
annotation.append(torch.from_numpy(sample["annotation"]).float())
file_names.append(sample["file_name"])
return {"image": torch.stack(images), "annotation": annotation, "file_name": file_names} | /retinaface_pytorch-0.0.8.tar.gz/retinaface_pytorch-0.0.8/retinaface/dataset.py | 0.877293 | 0.473292 | dataset.py | pypi |
from typing import Tuple
import torch
import torch.nn.functional as F
from torch import nn
from retinaface.box_utils import log_sum_exp, match
class MultiBoxLoss(nn.Module):
"""SSD Weighted Loss Function.
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter.
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(
self,
num_classes: int,
overlap_thresh: float,
prior_for_matching: bool,
bkg_label: int,
neg_mining: bool,
neg_pos: int,
neg_overlap: float,
encode_target: bool,
priors: torch.Tensor,
) -> None:
super().__init__()
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
self.variance = [0.1, 0.2]
self.priors = priors
def forward(
self, predictions: Tuple[torch.Tensor, torch.Tensor, torch.Tensor], targets: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Multibox Loss.
Args:
predictions: A tuple containing locations predictions, confidence predictions,
and prior boxes from SSD net.
conf shape: torch.size(batch_size, num_priors, num_classes)
loc shape: torch.size(batch_size, num_priors, 4)
priors shape: torch.size(num_priors, 4)
targets: Ground truth boxes and labels_gt for a batch,
shape: [batch_size, num_objs, 5] (last box_index is the label).
"""
locations_data, confidence_data, landmark_data = predictions
priors = self.priors.to(targets[0].device)
defaults = priors.data
num_predicted_boxes = locations_data.size(0)
num_priors = priors.size(0)
# match priors (default boxes) and ground truth boxes
boxes_t = torch.zeros(num_predicted_boxes, num_priors, 4).to(targets[0].device)
landmarks_t = torch.zeros(num_predicted_boxes, num_priors, 10).to(targets[0].device)
conf_t = torch.zeros(num_predicted_boxes, num_priors).to(targets[0].device).long()
for box_index in range(num_predicted_boxes):
box_gt = targets[box_index][:, :4].data
landmarks_gt = targets[box_index][:, 4:14].data
labels_gt = targets[box_index][:, 14].data
match(
self.threshold,
box_gt,
defaults,
self.variance,
labels_gt,
landmarks_gt,
boxes_t,
conf_t,
landmarks_t,
box_index,
)
# landmark Loss (Smooth L1) Shape: [batch, num_priors, 10]
positive_1 = conf_t > torch.zeros_like(conf_t)
num_positive_landmarks = positive_1.long().sum(1, keepdim=True)
n1 = max(num_positive_landmarks.data.sum().float(), 1) # type: ignore
pos_idx1 = positive_1.unsqueeze(positive_1.dim()).expand_as(landmark_data)
landmarks_p = landmark_data[pos_idx1].view(-1, 10)
landmarks_t = landmarks_t[pos_idx1].view(-1, 10)
loss_landm = F.smooth_l1_loss(landmarks_p, landmarks_t, reduction="sum")
positive = conf_t != torch.zeros_like(conf_t)
conf_t[positive] = 1
# Localization Loss (Smooth L1) Shape: [batch, num_priors, 4]
pos_idx = positive.unsqueeze(positive.dim()).expand_as(locations_data)
loc_p = locations_data[pos_idx].view(-1, 4)
boxes_t = boxes_t[pos_idx].view(-1, 4)
loss_l = F.smooth_l1_loss(loc_p, boxes_t, reduction="sum")
# Compute max conf across batch for hard negative mining
batch_conf = confidence_data.view(-1, self.num_classes)
loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
# Hard Negative Mining
loss_c[positive.view(-1, 1)] = 0 # filter out positive boxes for now
loss_c = loss_c.view(num_predicted_boxes, -1)
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = positive.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio * num_pos, max=positive.size(1) - 1)
neg = idx_rank < num_neg.expand_as(idx_rank)
# Confidence Loss Including Positive and Negative Examples
pos_idx = positive.unsqueeze(2).expand_as(confidence_data)
neg_idx = neg.unsqueeze(2).expand_as(confidence_data)
conf_p = confidence_data[(pos_idx + neg_idx).gt(0)].view(-1, self.num_classes)
targets_weighted = conf_t[(positive + neg).gt(0)]
loss_c = F.cross_entropy(conf_p, targets_weighted, reduction="sum")
# Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
n = max(num_pos.data.sum().float(), 1) # type: ignore
return loss_l / n, loss_c / n, loss_landm / n1 | /retinaface_pytorch-0.0.8.tar.gz/retinaface_pytorch-0.0.8/retinaface/multibox_loss.py | 0.961171 | 0.74297 | multibox_loss.py | pypi |
from collections import OrderedDict
from typing import Dict, List, Union
import albumentations as A
import numpy as np
import torch
from torch.nn import functional as F
from torchvision.ops import nms
from retinaface.box_utils import decode, decode_landm
from retinaface.network import RetinaFace
from retinaface.prior_box import priorbox
from retinaface.utils import tensor_from_rgb_image
ROUNDING_DIGITS = 2
class Model:
def __init__(self, max_size: int = 960, device: str = "cpu") -> None:
self.model = RetinaFace(
name="Resnet50",
pretrained=False,
return_layers={"layer2": 1, "layer3": 2, "layer4": 3},
in_channels=256,
out_channels=256,
).to(device)
self.device = device
self.transform = A.Compose([A.LongestMaxSize(max_size=max_size, p=1), A.Normalize(p=1)])
self.max_size = max_size
self.variance = [0.1, 0.2]
def load_state_dict(self, state_dict: OrderedDict) -> None:
self.model.load_state_dict(state_dict)
def eval(self) -> None: # noqa: A003
self.model.eval()
def predict_jsons(
self, image: np.ndarray, confidence_threshold: float = 0.7, nms_threshold: float = 0.4
) -> List[Dict[str, Union[List, float]]]:
with torch.no_grad():
original_height, original_width = image.shape[:2]
transformed_image = self.transform(image=image)["image"]
transformed_height, transformed_width = transformed_image.shape[:2]
transformed_size = (transformed_width, transformed_height)
scale_landmarks = torch.from_numpy(np.tile(transformed_size, 5)).to(self.device)
scale_bboxes = torch.from_numpy(np.tile(transformed_size, 2)).to(self.device)
prior_box = priorbox(
min_sizes=[[16, 32], [64, 128], [256, 512]],
steps=[8, 16, 32],
clip=False,
image_size=transformed_image.shape[:2],
).to(self.device)
torched_image = tensor_from_rgb_image(transformed_image).to(self.device)
loc, conf, land = self.model(torched_image.unsqueeze(0)) # pylint: disable=E1102
conf = F.softmax(conf, dim=-1)
annotations: List[Dict[str, Union[List, float]]] = []
boxes = decode(loc.data[0], prior_box, self.variance)
boxes *= scale_bboxes
scores = conf[0][:, 1]
landmarks = decode_landm(land.data[0], prior_box, self.variance)
landmarks *= scale_landmarks
# ignore low scores
valid_index = torch.where(scores > confidence_threshold)[0]
boxes = boxes[valid_index]
landmarks = landmarks[valid_index]
scores = scores[valid_index]
# do NMS
keep = nms(boxes, scores, nms_threshold)
boxes = boxes[keep, :]
if boxes.shape[0] == 0:
return [{"bbox": [], "score": -1, "landmarks": []}]
landmarks = landmarks[keep]
scores = scores[keep].cpu().numpy().astype(float)
boxes_np = boxes.cpu().numpy()
landmarks_np = landmarks.cpu().numpy()
resize_coeff = original_height / transformed_height
boxes_np *= resize_coeff
landmarks_np = landmarks_np.reshape(-1, 10) * resize_coeff
for box_id, bbox in enumerate(boxes_np):
x_min, y_min, x_max, y_max = bbox
x_min = np.clip(x_min, 0, original_width - 1)
x_max = np.clip(x_max, x_min + 1, original_width - 1)
if x_min >= x_max:
continue
y_min = np.clip(y_min, 0, original_height - 1)
y_max = np.clip(y_max, y_min + 1, original_height - 1)
if y_min >= y_max:
continue
annotations += [
{
"bbox": np.round(bbox.astype(float), ROUNDING_DIGITS).tolist(),
"score": np.round(scores, ROUNDING_DIGITS)[box_id],
"landmarks": np.round(landmarks_np[box_id].astype(float), ROUNDING_DIGITS)
.reshape(-1, 2)
.tolist(),
}
]
return annotations | /retinaface_pytorch-0.0.8.tar.gz/retinaface_pytorch-0.0.8/retinaface/predict_single.py | 0.950434 | 0.461381 | predict_single.py | pypi |
from typing import Dict, Tuple
import torch
from torch import nn
from torchvision import models
from torchvision.models import _utils
from retinaface.net import FPN, SSH
class ClassHead(nn.Module):
def __init__(self, in_channels: int = 512, num_anchors: int = 3) -> None:
super().__init__()
self.conv1x1 = nn.Conv2d(in_channels, num_anchors * 2, kernel_size=(1, 1), stride=(1, 1), padding=0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 2)
class BboxHead(nn.Module):
def __init__(self, in_channels: int = 512, num_anchors: int = 3):
super().__init__()
self.conv1x1 = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=(1, 1), stride=(1, 1), padding=0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 4)
class LandmarkHead(nn.Module):
def __init__(self, in_channels: int = 512, num_anchors: int = 3):
super().__init__()
self.conv1x1 = nn.Conv2d(in_channels, num_anchors * 10, kernel_size=(1, 1), stride=1, padding=0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 10)
class RetinaFace(nn.Module):
def __init__(
self, name: str, pretrained: bool, in_channels: int, return_layers: Dict[str, int], out_channels: int
) -> None:
super().__init__()
if name == "Resnet50":
backbone = models.resnet50(pretrained=pretrained)
else:
raise NotImplementedError(f"Only Resnet50 backbone is supported but got {name}")
self.body = _utils.IntermediateLayerGetter(backbone, return_layers)
in_channels_stage2 = in_channels
in_channels_list = [
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
self.fpn = FPN(in_channels_list, out_channels)
self.ssh1 = SSH(out_channels, out_channels)
self.ssh2 = SSH(out_channels, out_channels)
self.ssh3 = SSH(out_channels, out_channels)
self.ClassHead = self._make_class_head(fpn_num=3, in_channels=out_channels)
self.BboxHead = self._make_bbox_head(fpn_num=3, in_channels=out_channels)
self.LandmarkHead = self._make_landmark_head(fpn_num=3, in_channels=out_channels)
@staticmethod
def _make_class_head(fpn_num: int = 3, in_channels: int = 64, anchor_num: int = 2) -> nn.ModuleList:
classhead = nn.ModuleList()
for _ in range(fpn_num):
classhead.append(ClassHead(in_channels, anchor_num))
return classhead
@staticmethod
def _make_bbox_head(fpn_num: int = 3, in_channels: int = 64, anchor_num: int = 2) -> nn.ModuleList:
bboxhead = nn.ModuleList()
for _ in range(fpn_num):
bboxhead.append(BboxHead(in_channels, anchor_num))
return bboxhead
@staticmethod
def _make_landmark_head(fpn_num: int = 3, in_channels: int = 64, anchor_num: int = 2) -> nn.ModuleList:
landmarkhead = nn.ModuleList()
for _ in range(fpn_num):
landmarkhead.append(LandmarkHead(in_channels, anchor_num))
return landmarkhead
def forward(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
out = self.body(inputs)
# FPN
fpn = self.fpn(out)
# SSH
feature1 = self.ssh1(fpn[0])
feature2 = self.ssh2(fpn[1])
feature3 = self.ssh3(fpn[2])
features = [feature1, feature2, feature3]
bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)
classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)], dim=1)
ldm_regressions = torch.cat([self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1)
return bbox_regressions, classifications, ldm_regressions | /retinaface_pytorch-0.0.8.tar.gz/retinaface_pytorch-0.0.8/retinaface/network.py | 0.964237 | 0.584597 | network.py | pypi |
import argparse
import json
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import albumentations as albu
import cv2
import numpy as np
import torch
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
import yaml
from albumentations.core.serialization import from_dict
from iglovikov_helper_functions.config_parsing.utils import object_from_dict
from iglovikov_helper_functions.dl.pytorch.utils import state_dict_from_disk
from iglovikov_helper_functions.utils.image_utils import pad_to_size, unpad_from_size
from PIL import Image
from torch import nn
from torch.nn import functional as F
from torch.utils.data import Dataset
from torch.utils.data.distributed import DistributedSampler
from torchvision.ops import nms
from tqdm import tqdm
from retinaface.box_utils import decode, decode_landm
from retinaface.utils import tensor_from_rgb_image, vis_annotations
def get_args() -> Any:
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg("-i", "--input_path", type=Path, help="Path with images.", required=True)
arg("-c", "--config_path", type=Path, help="Path to config.", required=True)
arg("-o", "--output_path", type=Path, help="Path to save jsons.", required=True)
arg("-v", "--visualize", action="store_true", help="Visualize predictions")
arg("-m", "--max_size", type=int, help="Resize the largest side to this number", default=960)
arg("-b", "--batch_size", type=int, help="batch_size", default=1)
arg("-j", "--num_workers", type=int, help="num_workers", default=12)
arg("--confidence_threshold", default=0.7, type=float, help="confidence_threshold")
arg("--nms_threshold", default=0.4, type=float, help="nms_threshold")
arg("-w", "--weight_path", type=str, help="Path to weights.", required=True)
arg("--keep_top_k", default=750, type=int, help="keep_top_k")
arg("--world_size", default=-1, type=int, help="number of nodes for distributed training")
arg("--local_rank", default=-1, type=int, help="node rank for distributed training")
arg("--fp16", action="store_true", help="Use fp6")
arg("--folder_in_name", action="store_true", help="Add folder to the saved labels.")
return parser.parse_args()
class InferenceDataset(Dataset):
def __init__(
self, file_paths: List[Path], max_size: int, transform: albu.Compose
) -> None: # pylint: disable=W0231
self.file_paths = file_paths
self.transform = transform
self.max_size = max_size
self.resize = albu.LongestMaxSize(max_size=max_size, p=1)
def __len__(self) -> int:
return len(self.file_paths)
def __getitem__(self, idx: int) -> Optional[Dict[str, Any]]:
image_path = self.file_paths[idx]
image = np.array(Image.open(image_path))
image_height, image_width = image.shape[:2]
image = self.resize(image=image)["image"]
paded = pad_to_size(target_size=(self.max_size, self.max_size), image=image)
image = paded["image"]
pads = paded["pads"]
image = self.transform(image=image)["image"]
return {
"torched_image": tensor_from_rgb_image(image),
"image_path": str(image_path),
"pads": np.array(pads),
"image_height": image_height,
"image_width": image_width,
}
def unnormalize(image: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
for c in range(image.shape[-1]):
image[:, :, c] *= std[c] # type: ignore
image[:, :, c] += mean[c] # type: ignore
image[:, :, c] *= 255 # type: ignore
return image
def process_predictions(
prediction: Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
original_shapes: List[Tuple[int, int]],
input_shape: Tuple[int, int, int, int],
pads: Tuple[int, int, int, int],
confidence_threshold: float,
nms_threshold: float,
prior_box: torch.Tensor,
variance: Tuple[float, float],
keep_top_k: bool,
) -> List[List[Dict[str, Union[float, List[float]]]]]:
loc, conf, land = prediction
conf = F.softmax(conf, dim=-1)
result: List[List[Dict[str, Union[List[float], float]]]] = []
batch_size, _, image_height, image_width = input_shape
scale1 = torch.from_numpy(np.tile([image_width, image_height], 5)).to(loc.device)
scale = torch.from_numpy(np.tile([image_width, image_height], 2)).to(loc.device)
for batch_id in range(batch_size):
annotations: List[Dict[str, Union[List, float]]] = []
boxes = decode(loc.data[batch_id], prior_box.to(loc.device), variance)
boxes *= scale
scores = conf[batch_id][:, 1]
landmarks = decode_landm(land.data[batch_id], prior_box.to(land.device), variance)
landmarks *= scale1
# ignore low scores
valid_index = torch.where(scores > confidence_threshold)[0]
boxes = boxes[valid_index]
landmarks = landmarks[valid_index]
scores = scores[valid_index]
order = scores.argsort(descending=True)
boxes = boxes[order]
landmarks = landmarks[order]
scores = scores[order]
# do NMS
keep = nms(boxes, scores, nms_threshold)
boxes = boxes[keep, :].int()
if boxes.shape[0] == 0:
result += [[{"bbox": [], "score": -1, "landmarks": []}]]
continue
landmarks = landmarks[keep]
scores = scores[keep].cpu().numpy().astype(np.float64)[:keep_top_k]
boxes = boxes.cpu().numpy()[:keep_top_k, :]
landmarks = landmarks.cpu().numpy()[:keep_top_k, :]
landmarks = landmarks.reshape([-1, 2])
if pads is None:
pads_numpy = np.array([0, 0, 0, 0])
else:
pads_numpy = pads[batch_id]
unpadded = unpad_from_size(pads_numpy, bboxes=boxes, keypoints=landmarks)
resize_coeff = max(original_shapes[batch_id]) / max(image_height, image_width)
boxes = (unpadded["bboxes"] * resize_coeff).astype(int)
landmarks = (unpadded["keypoints"].reshape(-1, 10) * resize_coeff).astype(int)
for crop_id, bbox in enumerate(boxes):
annotations += [
{
"bbox": bbox.tolist(),
"score": float(scores[crop_id]),
"landmarks": landmarks[crop_id].reshape(-1, 2).tolist(),
}
]
result += [annotations]
return result
def main() -> None:
args = get_args()
torch.distributed.init_process_group(backend="nccl")
with args.config_path.open() as f:
hparams = yaml.load(f, Loader=yaml.SafeLoader)
hparams.update(
{
"json_path": args.output_path,
"visualize": args.visualize,
"confidence_threshold": args.confidence_threshold,
"nms_threshold": args.nms_threshold,
"keep_top_k": args.keep_top_k,
"local_rank": args.local_rank,
"prior_box": object_from_dict(hparams["prior_box"], image_size=[args.max_size, args.max_size]),
"fp16": args.fp16,
"folder_in_name": args.folder_in_name,
}
)
if args.visualize:
output_vis_path = args.output_path / "viz"
output_vis_path.mkdir(parents=True, exist_ok=True)
hparams["output_vis_path"] = output_vis_path
output_label_path = args.output_path / "labels"
output_label_path.mkdir(parents=True, exist_ok=True)
hparams["output_label_path"] = output_label_path
device = torch.device("cuda", args.local_rank)
model = object_from_dict(hparams["model"])
model = model.to(device)
if args.fp16:
model = model.half()
corrections: Dict[str, str] = {"model.": ""}
state_dict = state_dict_from_disk(file_path=args.weight_path, rename_in_layers=corrections)
model.load_state_dict(state_dict)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank
)
file_paths = list(args.input_path.rglob("*.jpg"))
dataset = InferenceDataset(file_paths, max_size=args.max_size, transform=from_dict(hparams["test_aug"]))
sampler: DistributedSampler = DistributedSampler(dataset, shuffle=False)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=True,
shuffle=False,
drop_last=False,
sampler=sampler,
)
predict(dataloader, model, hparams, device)
def predict(dataloader: torch.utils.data.DataLoader, model: nn.Module, hparams: dict, device: torch.device) -> None:
model.eval()
if hparams["local_rank"] == 0:
loader = tqdm(dataloader)
else:
loader = dataloader
with torch.no_grad():
for batch in loader:
torched_images = batch["torched_image"] # images that are rescaled and padded
if hparams["fp16"]:
torched_images = torched_images.half()
pads = batch["pads"]
image_paths = batch["image_path"]
image_heights = batch["image_height"]
image_widths = batch["image_width"]
batch_size = torched_images.shape[0]
image_heights = image_heights.cpu().numpy()
image_widths = image_widths.cpu().numpy()
original_shapes = list(zip(image_heights, image_widths))
prediction = model(torched_images.to(device))
output_annotations = process_predictions(
prediction=prediction,
original_shapes=original_shapes,
input_shape=torched_images.shape,
pads=pads.cpu().numpy(),
confidence_threshold=hparams["confidence_threshold"],
nms_threshold=hparams["nms_threshold"],
prior_box=hparams["prior_box"],
variance=hparams["test_parameters"]["variance"],
keep_top_k=hparams["keep_top_k"],
)
for batch_id in range(batch_size):
annotations = output_annotations[batch_id]
if not annotations[0]["bbox"]:
continue
folder_name = Path(image_paths[batch_id]).parent.name
file_name = Path(image_paths[batch_id]).name
file_id = Path(image_paths[batch_id]).stem
predictions = {
"file_name": file_name,
"annotations": annotations,
"file_path": str(Path(folder_name) / file_name),
}
(hparams["output_label_path"] / folder_name).mkdir(exist_ok=True, parents=True)
result_path = hparams["output_label_path"] / folder_name / f"{file_id}.json"
with result_path.open("w") as f:
json.dump(predictions, f, indent=2)
if hparams["visualize"]:
normalized_image = np.transpose(torched_images[batch_id].cpu().numpy(), (1, 2, 0))
image = unnormalize(normalized_image)
unpadded = unpad_from_size(pads[batch_id].cpu().numpy(), image)
original_image_height = image_heights[batch_id].item()
original_image_width = image_widths[batch_id].item()
image = cv2.resize(
unpadded["image"].astype(np.uint8), (original_image_width, original_image_height)
)
image = vis_annotations(image, annotations=annotations) # type: ignore
(hparams["output_vis_path"] / folder_name).mkdir(exist_ok=True, parents=True)
result_path = hparams["output_vis_path"] / folder_name / f"{file_id}.jpg"
cv2.imwrite(str(result_path), cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if __name__ == "__main__":
main() | /retinaface_pytorch-0.0.8.tar.gz/retinaface_pytorch-0.0.8/retinaface/inference.py | 0.892808 | 0.268054 | inference.py | pypi |
import argparse
from typing import Dict, List, Tuple, Union
import albumentations as albu
import cv2
import numpy as np
import onnx
import onnxruntime as ort
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import model_zoo
from torchvision.ops import nms
from retinaface.box_utils import decode, decode_landm
from retinaface.network import RetinaFace
from retinaface.prior_box import priorbox
from retinaface.utils import tensor_from_rgb_image, vis_annotations
state_dict = model_zoo.load_url(
"https://github.com/ternaus/retinaface/releases/download/0.01/retinaface_resnet50_2020-07-20-f168fae3c.zip",
progress=True,
map_location="cpu",
)
class M(nn.Module):
def __init__(self, max_size: int = 1280):
super().__init__()
self.model = RetinaFace(
name="Resnet50",
pretrained=False,
return_layers={"layer2": 1, "layer3": 2, "layer4": 3},
in_channels=256,
out_channels=256,
)
self.model.load_state_dict(state_dict)
self.max_size = max_size
self.scale_landmarks = torch.from_numpy(np.tile([self.max_size, self.max_size], 5))
self.scale_bboxes = torch.from_numpy(np.tile([self.max_size, self.max_size], 2))
self.prior_box = priorbox(
min_sizes=[[16, 32], [64, 128], [256, 512]],
steps=[8, 16, 32],
clip=False,
image_size=(self.max_size, self.max_size),
)
self.nms_threshold: float = 0.4
self.variance = [0.1, 0.2]
self.confidence_threshold: float = 0.7
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
loc, conf, land = self.model(x)
conf = F.softmax(conf, dim=-1)
boxes = decode(loc.data[0], self.prior_box, self.variance)
boxes *= self.scale_bboxes
scores = conf[0][:, 1]
landmarks = decode_landm(land.data[0], self.prior_box, self.variance)
landmarks *= self.scale_landmarks
# ignore low scores
valid_index = torch.where(scores > self.confidence_threshold)[0]
boxes = boxes[valid_index]
landmarks = landmarks[valid_index]
scores = scores[valid_index]
# do NMS
keep = nms(boxes, scores, self.nms_threshold)
boxes = boxes[keep, :]
landmarks = landmarks[keep]
scores = scores[keep]
return boxes, scores, landmarks
def prepare_image(image: np.ndarray, max_size: int = 1280) -> np.ndarray:
image = albu.Compose([albu.LongestMaxSize(max_size=max_size), albu.Normalize(p=1)])(image=image)["image"]
height, width = image.shape[:2]
return cv2.copyMakeBorder(image, 0, max_size - height, 0, max_size - width, borderType=cv2.BORDER_CONSTANT)
def main() -> None:
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg(
"-m",
"--max_size",
type=int,
help="Size of the input image. The onnx model will predict on (max_size, max_size)",
required=True,
)
arg("-o", "--output_file", type=str, help="Path to save onnx model.", required=True)
args = parser.parse_args()
raw_image = cv2.imread("tests/data/13.jpg")
image = prepare_image(raw_image, args.max_size)
x = tensor_from_rgb_image(image).unsqueeze(0).float()
model = M(max_size=args.max_size)
model.eval()
with torch.no_grad():
out_torch = model(x)
torch.onnx.export(
model,
x,
args.output_file,
verbose=True,
opset_version=12,
input_names=["input"],
export_params=True,
do_constant_folding=True,
)
onnx_model = onnx.load(args.output_file)
onnx.checker.check_model(onnx_model)
ort_session = ort.InferenceSession(args.output_file)
outputs = ort_session.run(None, {"input": np.expand_dims(np.transpose(image, (2, 0, 1)), 0)})
for i in range(3):
if not np.allclose(out_torch[i].numpy(), outputs[i]):
raise ValueError("torch and onnx models do not match!")
annotations: List[Dict[str, List[Union[float, List[float]]]]] = []
for box_id, box in enumerate(outputs[0]):
annotations += [
{
"bbox": box.tolist(),
"score": outputs[1][box_id],
"landmarks": outputs[2][box_id].reshape(-1, 2).tolist(),
}
]
im = albu.Compose([albu.LongestMaxSize(max_size=1280)])(image=raw_image)["image"]
cv2.imwrite("example.jpg", vis_annotations(im, annotations))
if __name__ == "__main__":
main() | /retinaface_pytorch-0.0.8.tar.gz/retinaface_pytorch-0.0.8/converters/to_onnx.py | 0.894376 | 0.42179 | to_onnx.py | pypi |
import tensorflow as tf
import numpy as np
from utilpack.util import *
import os
class RetinaFace(object):
def __init__(self,quality='normal'):
"""
:param quality: one of [ 'high','normal','speed' ]
"""
if quality == 'normal':
self._resizeFunc = lambda v: PyImageUtil.resize_image(v[0], **{v[1]: 800})
elif quality =='speed':
self._resizeFunc = lambda v: PyImageUtil.resize_image(v[0], **{v[1]: 320})
else:
self._resizeFunc = lambda v: v[0]
print("model[{} quality] init ..".format(quality))
current_dir = os.path.dirname(os.path.abspath(__file__))
with tf.io.gfile.GFile(current_dir+'/frozen_graph.pb', "rb") as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
def _imports_graph_def():
tf.compat.v1.import_graph_def(graph_def, name="")
wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
self._model = wrapped_import.prune(
tf.nest.map_structure(import_graph.as_graph_element, ['x:0']),
tf.nest.map_structure(import_graph.as_graph_element, ['Identity:0'])
)
self.predict(np.zeros((320,320,3),dtype=np.float32))
print("model success !")
def read(self,image_path):
"""
read image from path
:param image_path:
:return: rgb image, float32
"""
img_cv = PyImageUtil.cv2.imread(image_path)
rgb_image = PyImageUtil.cv2.cvtColor(img_cv,PyImageUtil.cv2.COLOR_BGR2RGB).astype(np.float32)
return rgb_image
def _predict(self,rgb_image,threshold=0.95):
"""
detect face in rgb image
:param rgb_image: rgb image, ! width, height have to multiplier of 32 !, float32
:param threshold: threshold of confidence
:return: faces(list), eache face(dict) has a key = [ x1, y1, x2, y2,left_eye,right_eye,nose,left_lip,right_lip ]
"""
img_h, img_w = rgb_image.shape[:2]
# preprocessing (padding)
x = tf.cast(rgb_image, dtype=tf.float32)
# prediction
outputs = tf.squeeze(self._model(x[tf.newaxis, ...]), axis=0)
# postprocessing (remove-padding,ratio to pixcel, threshold)
outputs = tf.concat([
tf.reshape(tf.multiply(tf.reshape(tf.slice(outputs, [0, 0], [-1, 14]), [-1, 7, 2]),[img_w, img_h]),[-1, 14]),
tf.slice(outputs, [0, 14], [-1, 2])
], axis=1)
outputs = tf.gather_nd(outputs, tf.where(tf.squeeze(tf.slice(outputs, [0, 15], [-1, 1]), axis=-1) >= threshold))
faces = []
for bbox in outputs:
x1, y1, x2, y2 = list(map(int, bbox[:4]))
left_eye, right_eye, nose, left_lip, right_lip = list(map(tuple, np.reshape(bbox, [-1, 2]).astype(np.int)[2:-1]))
faces.append({
'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'left_eye': left_eye, 'right_eye': right_eye, 'nose': nose, 'left_lip': left_lip, 'right_lip': right_lip
})
return faces
def predict(self,rgb_image,threshold=0.95):
"""
detect face in rgb image
:param rgb_image: rgb image, any size, float32
:param threshold: threshold of confidence
:return: faces(list), eache face(dict) has a key = [ x1, y1, x2, y2,left_eye,right_eye,nose,left_lip,right_lip ]
"""
img_h_, img_w_ = rgb_image.shape[:2]
if img_h_>img_w_:
rgb_image = self._resizeFunc([rgb_image,'height'])
else:
rgb_image = self._resizeFunc([rgb_image, 'width'])
img_h, img_w = rgb_image.shape[:2]
# preprocessing (padding)
max_steps = 32
img_h_pad = max_steps - img_h % max_steps if img_h and img_h % max_steps != 0 else 0
img_w_pad = max_steps - img_w % max_steps if img_w and img_w % max_steps != 0 else 0
padded_img = tf.pad(rgb_image, [[0, img_h_pad], [0, img_w_pad], [0, 0]])
x = tf.cast(padded_img, dtype=tf.float32)
# prediction
outputs = tf.squeeze(self._model(x[tf.newaxis, ...]), axis=0)
# postprocessing (remove-padding,ratio to pixcel, threshold)
outputs = tf.concat([
tf.reshape(tf.multiply(tf.reshape(tf.slice(outputs, [0, 0], [-1, 14]), [-1, 7, 2]),
[tf.add(img_w_pad, img_w if img_w else 0),
tf.add(img_h_pad, img_h if img_h else 0)]),
[-1, 14]),
tf.slice(outputs, [0, 14], [-1, 2])
], axis=1)
outputs = tf.gather_nd(outputs, tf.where(tf.squeeze(tf.slice(outputs, [0, 15], [-1, 1]), axis=-1) >= threshold))
faces=[]
for bbox in outputs:
w_ex = img_w_ / img_w
h_ex = img_h_ / img_h
x1, y1, x2, y2 = list(map(int, np.multiply(bbox[:4],[w_ex,h_ex,w_ex,h_ex])))
left_eye,right_eye,nose,left_lip,right_lip = list(map(tuple,np.multiply(np.reshape(bbox, [-1, 2]),[w_ex,h_ex]).astype(np.int)[2:-1]))
faces.append({
'x1':x1,'y1':y1,'x2':x2,'y2':y2,
'left_eye':left_eye,'right_eye':right_eye,'nose':nose,'left_lip':left_lip,'right_lip':right_lip
})
return faces
def draw(self,rgb_image, faces,thickness=3,**kwargs):
"""
:param rgb_image: rgb_image , same size of predict's input
:param faces: result of predict method
:param thickness: thickness of line's
:keyword colors: list of color, each color element mean [ faceRect, left_eye, right_eye, nose, left_lip, right_lip ]
:return: result image
"""
darwing_img = rgb_image.copy()
if 'colors' in kwargs:
colors = kwargs['colors']
else:
colors = [(255, 0, 255),(255, 0, 0),(0, 255, 0),(0, 0, 255),(0, 0, 0),(255, 0, 255)]
for face in faces:
PyImageUtil.cv2.rectangle(darwing_img, (face['x1'], face['y1']), (face['x2'], face['y2']), colors[0], thickness)
PyImageUtil.cv2.circle(darwing_img, face['left_eye'], 1, colors[1], thickness)
PyImageUtil.cv2.circle(darwing_img, face['right_eye'], 1, colors[2], thickness)
PyImageUtil.cv2.circle(darwing_img, face['nose'], 1, colors[3], thickness)
PyImageUtil.cv2.circle(darwing_img, face['left_lip'], 1, colors[4], thickness)
PyImageUtil.cv2.circle(darwing_img, face['right_lip'], 1, colors[5], thickness)
return darwing_img
if __name__ == '__main__':
import cv2
detector = RetinaFace('normal')
for path in PyDataUtil.get_pathlist('/Users/hian/Desktop/Data/image_data/snaps_image/thum'):
rgb_image = detector.read(path)
rgb_image = cv2.resize(rgb_image,(640,640))
PyDebugUtil.tic()
faces = detector._predict(rgb_image)
time = PyDebugUtil.toc() | /src/retinaface.py | 0.719482 | 0.319201 | retinaface.py | pypi |
import numpy as np
import matplotlib.pyplot as plt
import retinotopic_mapping.StimulusRoutines as stim
from retinotopic_mapping.MonitorSetup import Monitor, Indicator
from retinotopic_mapping.DisplayStimulus import DisplaySequence
"""
To get up and running quickly before performing any experiments it is
sufficient to setup two monitors -- one for display and one for your python
environment. If you don't have two monitors at the moment it is doable with
only one.
Edit the following block of code with your own monitors respective parameters.
Since this script is for general debugging and playing around with the code,
we will arbitrarily populate variables that describe the geometry of where
the mouse will be located during an experiment. All we are interested in
here is just making sure that we can display stimulus on a monitor and learning
how to work with the different stimulus routines.
"""
# ======================== monitor parameters ==================================
mon_resolution = (1200,1920) #enter your monitors resolution (height, width)
mon_width_cm = 52 #enter your monitors width in cm
mon_height_cm = 32 #enter your monitors height in cm
mon_refresh_rate = 60 #enter your monitors height in Hz
# The following variables correspond to the geometry of your setup don't worry about them for now.
mon_C2T_cm = mon_height_cm / 2. # center (projection point from mouse eye to the monitor) to monitor top edge in cm
mon_C2A_cm = mon_width_cm / 2. # center (projection point from mouse eye to the monitor) to monitor anterior edge in cm
mon_center_coordinates = (0., 60.) # the visual coordinates of center (altitude, azimuth)
mon_dis_cm = 15. # cm from mouse eye to the monitor
mon_downsample_rate = 10 # downsample rate of the displayed images relative to the monitor resolution.
# the both numbers in mon_resolution should be divisble by this number
# ======================== monitor parameters ==================================
# ======================== indicator parameters ================================
ind_width_cm = 3.
ind_height_cm = 3.
ind_position = 'northeast'
ind_is_sync = 'True'
ind_freq = 1.
# ======================== indicator parameters ================================
# ============================ generic stimulus parameters ======================
pregap_dur = 2.
postgap_dur = 3.
background = 0.
coordinate = 'degree'
# ===============================================================================
# ============================ LocallySparseNoise ===============================
lsn_subregion = None
lsn_min_distance = 40.
lsn_grid_space = (4., 4.)
lsn_probe_size = (4., 4.)
lsn_probe_orientation = 0.
lsn_probe_frame_num = 15
lsn_sign = 'ON-OFF'
lsn_iteration = 10
lsn_is_include_edge = True
# ===============================================================================
# ============================ DisplaySequence ====================================
ds_log_dir = r'C:\data'
ds_backupdir = None
ds_identifier = 'TEST'
ds_display_iter = 1
ds_mouse_id = 'MOUSE'
ds_user_id = 'USER'
ds_psychopy_mon = 'testMonitor'
ds_is_by_index = True
ds_is_interpolate = False
ds_is_triggered = False
ds_trigger_event = "negative_edge"
ds_trigger_NI_dev = 'Dev1'
ds_trigger_NI_port = 1
ds_trigger_NI_line = 0
ds_is_sync_pulse = False
ds_sync_pulse_NI_dev = 'Dev1'
ds_sync_pulse_NI_port = 1
ds_sync_pulse_NI_line = 1
ds_display_screen = 0
ds_initial_background_color = 0.
# =================================================================================
# Initialize Monitor object
mon = Monitor(resolution=mon_resolution, dis=mon_dis_cm, mon_width_cm=mon_width_cm, mon_height_cm=mon_height_cm,
C2T_cm=mon_C2T_cm, C2A_cm=mon_C2A_cm, center_coordinates=mon_center_coordinates,
downsample_rate=mon_downsample_rate)
# plot warpped monitor coordinates
mon.plot_map()
plt.show()
# initialize Indicator object
ind = Indicator(mon, width_cm=ind_width_cm, height_cm=ind_height_cm, position=ind_position, is_sync=ind_is_sync,
freq=ind_freq)
# initialize LocallySparseNoise object
lsn = stim.LocallySparseNoise(monitor=mon, indicator=ind, pregap_dur=pregap_dur,
postgap_dur=postgap_dur, coordinate=coordinate,
background=background, subregion=lsn_subregion,
grid_space=lsn_grid_space, sign=lsn_sign,
probe_size=lsn_probe_size, probe_orientation=lsn_probe_orientation,
probe_frame_num=lsn_probe_frame_num, iteration=lsn_iteration,
is_include_edge=lsn_is_include_edge, min_distance=lsn_min_distance)
# initialize DisplaySequence object
ds = DisplaySequence(log_dir=ds_log_dir, backupdir=ds_backupdir,
identifier=ds_identifier, display_iter=ds_display_iter,
mouse_id=ds_mouse_id, user_id=ds_user_id,
psychopy_mon=ds_psychopy_mon, is_by_index=ds_is_by_index,
is_interpolate=ds_is_interpolate, is_triggered=ds_is_triggered,
trigger_event=ds_trigger_event, trigger_NI_dev=ds_trigger_NI_dev,
trigger_NI_port=ds_trigger_NI_port, trigger_NI_line=ds_trigger_NI_line,
is_sync_pulse=ds_is_sync_pulse, sync_pulse_NI_dev=ds_sync_pulse_NI_dev,
sync_pulse_NI_port=ds_sync_pulse_NI_port,
sync_pulse_NI_line=ds_sync_pulse_NI_line,
display_screen=ds_display_screen,
initial_background_color=ds_initial_background_color)
# display
# =============================== display =========================================
ds.set_stim(lsn)
ds.trigger_display()
plt.show()
# ================================================================================= | /retinotopic_mapping-2.7.0.tar.gz/retinotopic_mapping-2.7.0/retinotopic_mapping/examples/visual_stimlation/example_locally_sparse_noise.py | 0.45641 | 0.51751 | example_locally_sparse_noise.py | pypi |
from psychopy import visual, event
import os
import datetime
import numpy as np
import matplotlib.pyplot as plt
import time
from tools import FileTools as ft
from tools.IO import nidaq as iodaq
def analyze_frames(ts, refresh_rate, check_point=(0.02, 0.033, 0.05, 0.1)):
"""
Analyze frame durations of time stamp data. input is the time stamps
of each frame and the refresh rate of the monitor
Parameters
----------
ts : ndarray
list of time stamps of each frame measured in seconds
refresh_rate : float
the refresh rate of imaging monitor measured in Hz
check_point : tuple, optional
Returns
-------
frame_duration : ndarray
list containing the length of each time stamp
frame_stats : str
string containing a statistical analysis of the image frames
"""
frame_duration = ts[1::] - ts[0:-1]
plt.figure()
plt.hist(frame_duration, bins=np.linspace(0.0, 0.05, num=51))
refresh_rate = float(refresh_rate)
num_frames = len(ts)-1
disp_true = ts[-1]-ts[0]
disp_expect = (len(ts)-1)/refresh_rate
avg_frame_time = np.mean(frame_duration)*1000
sdev_frame_time = np.std(frame_duration)*1000
short_frame = min(frame_duration)*1000
short_frame_ind = np.nonzero(frame_duration==np.min(frame_duration))[0][0]
long_frame = max(frame_duration)*1000
long_frame_ind = np.nonzero(frame_duration==np.max(frame_duration))[0][0]
frame_stats = '\n'
frame_stats += 'Total number of frames : %d. \n' % num_frames
frame_stats += 'Total length of display : %.5f second. \n' % disp_true
frame_stats += 'Expected length of display: %.5f second. \n' % disp_expect
frame_stats += 'Mean of frame durations : %.2f ms. \n' % avg_frame_time
frame_stats += 'Standard deviation of frame : %.2f ms.\n' % sdev_frame_time
frame_stats += 'Shortest frame: %.2f ms, index: %d. \n' % (short_frame,
short_frame_ind)
frame_stats += 'Longest frame : %.2f ms, index: %d. \n' % (long_frame,
long_frame_ind)
for i in range(len(check_point)):
check_number = check_point[i]
frame_number = len(frame_duration[frame_duration>check_number])
frame_stats += 'Number of frames longer than %d ms: %d; %.2f%% \n' \
% (round(check_number*1000),
frame_number,
round(frame_number*10000/(len(ts)-1))/100)
print frame_stats
return frame_duration, frame_stats
class DisplaySequence(object):
"""
Display the numpy sequence from memory
"""
def __init__(self,
log_dir,
backupdir=None,
display_iter=1,
display_order=1,
mouse_id='Test',
user_id='Name',
psychopy_mon='testMonitor',
is_interpolate=False,
is_triggered=False,
trigger_NI_dev='Dev1',
trigger_NI_port=1,
trigger_NI_line=0,
is_sync_pulse_pulse=True,
sync_pulse_NI_dev='Dev1',
sync_pulse_NI_port=1,
sync_pulse_NI_line=1,
display_trigger_event="negative_edge",
display_screen=0,
initial_background_color=0,
file_num_NI_dev='Dev1',
file_num_NI_port='0',
file_num_NI_lines='0:7'):
"""
initialize `DisplaySequence` object
Parameters
----------
log_dir : str
system directory path to where log display will be saved
backupdir : str, optional
copy of directory path to save backup, defaults to `None`
display_iter : int, optional
defaults to 1
display_order : int, optional
determines whether the stimulus is presented forward or backwards.
If 1, stimulus is presented forward, whereas if -1, stimulus is
presented backwards. Defaults to 1.
mouse_id : str, optional
label for mouse, defaults to 'Test'
user_id : str, optional
label for person performing experiment, defaults to 'Name'
psychopy_mon : str, optional
label for monitor used for displaying the stimulus, defaults to
'testMonitor'
is_interpolate : bool, optional
defaults to False
is_triggered : bool, optional
if True, stimulus will not display until triggered. if False,
stimulus will display automatically. defaults to False
trigger_NI_dev : str, optional
defaults to 'Dev1'
trigger_NI_port : int, optional
defaults to 1
trigger_NI_line : int, optional
defaults to 0
is_sync_pulse_pulse : bool, optional
defaults to True
sync_pulse_NI_dev : str, optional
defaults to 'Dev1'
sync_pulse_NI_port : int, optional
defaults to 1
sync_pulse_NI_line : int, optional
defaults to 1
display_trigger_event :
should be one of "negative_edge", "positive_edge", "high_level",
or "low_level". defaults to "negative_edge"
display_screen :
determines which monitor to display stimulus on. defaults to 0
initial_background_color :
defaults to 0
file_num_NI_dev :
defaults to 'Dev1',
file_num_NI_port :
defaults to '0'
file_num_NI_lines :
defaults to '0:7'
"""
self.sequence = None
self.seq_log = {}
self.psychopy_mon = psychopy_mon
self.is_interpolate = is_interpolate
self.is_triggered = is_triggered
self.trigger_NI_dev = trigger_NI_dev
self.trigger_NI_port = trigger_NI_port
self.trigger_NI_line = trigger_NI_line
self.display_trigger_event = display_trigger_event
self.is_sync_pulse_pulse = is_sync_pulse_pulse
self.sync_pulse_NI_dev = sync_pulse_NI_dev
self.sync_pulse_NI_port = sync_pulse_NI_port
self.sync_pulse_NI_line = sync_pulse_NI_line
self.display_screen = display_screen
self.initial_background_color = initial_background_color
self.keep_display = None
self.file_num_NI_dev = file_num_NI_dev
self.file_num_NI_port = file_num_NI_port
self.file_num_NI_lines = file_num_NI_lines
if display_iter % 1 == 0:
self.display_iter = display_iter
else:
raise ArithmeticError, "`display_iter` should be a whole number."
self.display_order = display_order
self.log_dir = log_dir
self.backupdir = backupdir
self.mouse_id = mouse_id
self.user_id = user_id
self.seq_log = None
self.clear()
def set_any_array(self, any_array, log_dict = None):
"""
to display any numpy 3-d array.
"""
if len(any_array.shape) != 3:
raise LookupError, "Input numpy array should have dimension of 3!"
vmax = np.amax(any_array).astype(np.float32)
vmin = np.amin(any_array).astype(np.float32)
v_range = (vmax-vmin)
any_array_nor = ((any_array-vmin)/v_range).astype(np.float16)
self.sequence = 2*(any_array_nor-0.5)
if log_dict != None:
if type(log_dict) is dict:
self.seq_log = log_dict
else:
raise ValueError, '`log_dict` should be a dictionary!'
else:
self.seq_log = {}
self.clear()
def set_stim(self, stim):
"""
generate sequence of stimulus to be displayed.
Calls the `generate_movie` method of the respective stim object and
populates the attributes `self.sequence` and `self.seq_log`
Parameters
----------
stim : Stim object
the type of stimulus to be presented in the experiment
"""
self.sequence, self.seq_log = stim.generate_movie()
self.clear()
def trigger_display(self):
"""
Display stimulus
Prepares all of the necessary parameters to display stimulus and store
the data collected.
"""
# --------------- early preparation for display--------------------
# test monitor resolution
try:
resolution = self.seq_log['monitor']['resolution'][::-1]
except KeyError:
resolution = (800,600)
# test monitor refresh rate
try:
refresh_rate = self.seq_log['monitor']['refresh_rate']
except KeyError:
print "No monitor refresh rate information, assuming 60Hz.\n"
refresh_rate = 60.
#prepare display frames log
if self.sequence is None:
raise LookupError, "Please set the sequence to be displayed!!\n"
try:
seq_frames = self.seq_log['stimulation']['frames']
if self.display_order == -1:
seq_frames = seq_frames[::-1]
# generate display Frames
self.display_frames=[]
for i in range(self.display_iter):
self.display_frames += seq_frames
except Exception as e:
print e
print "No frame information in seq_log dictionary."
print "Setting display_frames to 'None'.\n"
self.display_frames = None
# calculate expected display time
display_time = (float(self.sequence.shape[0]) *
self.display_iter / refresh_rate)
print '\n Expected display time: ', display_time, ' seconds\n'
# generate file name
self._get_file_name()
print 'File name:', self.file_name + '\n'
# -----------------setup psychopy window and stimulus--------------
# start psychopy window
window = visual.Window(size=resolution,
monitor=self.psychopy_mon,
fullscr=True,
screen=self.display_screen,
color=self.initial_background_color)
stim = visual.ImageStim(window, size=(2,2),
interpolate=self.is_interpolate)
# initialize keep_display
self.keep_display = True
# handle display trigger
if self.is_triggered:
display_wait = self._wait_for_trigger(event=self.display_trigger_event)
if not display_wait:
window.close()
self.clear()
return None
else:
time.sleep(5.) # wait remote object to start
# display sequence
self._display(window, stim)
self.save_log()
#analyze frames
try:
self.frame_duration, self.frame_stats = \
analyze_frames(ts=self.time_stamp,
refresh_rate = self.seq_log['monitor']['refresh_rate'])
except KeyError:
print "No monitor refresh rate information, assuming 60Hz."
self.frame_duration, self.frame_stats = \
analyze_frames(ts = self.time_stamp, refresh_rate = 60.)
#clear display data
self.clear()
def _wait_for_trigger(self, event):
"""
time place holder for waiting for trigger
Parameters
----------
event : str from {'low_level','high_level','negative_edge','positive_edge'}
Returns
-------
Bool :
returns True if trigger is detected and False if manual stop
signal is detected
"""
#check NI signal
trigger_task = iodaq.DigitalInput(self.trigger_NI_dev,
self.trigger_NI_port,
self.trigger_NI_line)
trigger_task.StartTask()
print "Waiting for trigger: " + event + ' on ' + trigger_task.devstr
if event == 'low_level':
last_TTL = trigger_task.read()
while last_TTL != 0 and self.keep_display:
last_TTL = trigger_task.read()[0]
self._update_display_status()
else:
if self.keep_display:
trigger_task.StopTask()
print 'Trigger detected. Start displaying...\n\n'
return True
else:
trigger_task.StopTask()
print 'Manual stop signal detected. Stopping the program.'
return False
elif event == 'high_level':
last_TTL = trigger_task.read()[0]
while last_TTL != 1 and self.keep_display:
last_TTL = trigger_task.read()[0]
self._update_display_status()
else:
if self.keep_display:
trigger_task.StopTask()
print 'Trigger detected. Start displaying...\n\n'
return True
else:
trigger_task.StopTask()
print 'Manual stop signal detected. Stopping the program.'
return False
elif event == 'negative_edge':
last_TTL = trigger_task.read()[0]
while self.keep_display:
current_TTL = trigger_task.read()[0]
if (last_TTL == 1) and (current_TTL == 0):
break
else:
last_TTL = int(current_TTL)
self._update_display_status()
else:
trigger_task.StopTask()
print 'Manual stop signal detected. Stopping the program.'
return False
trigger_task.StopTask()
print 'Trigger detected. Start displaying...\n\n'
return True
elif event == 'positive_edge':
last_TTL = trigger_task.read()[0]
while self.keep_display:
current_TTL = trigger_task.read()[0]
if (last_TTL == 0) and (current_TTL == 1):
break
else:
last_TTL = int(current_TTL)
self._update_display_status()
else:
trigger_task.StopTask();
print 'Manual stop signal detected. Stopping the program.'
return False
trigger_task.StopTask()
print 'Trigger detected. Start displaying...\n\n'
return True
else:
raise NameError, "`trigger` not in " \
"{'negative_edge','positive_edge', 'high_level','low_level'}!"
def _get_file_name(self):
"""
generate the file name of log file
"""
try:
self.file_name = datetime.datetime.now().strftime('%y%m%d%H%M%S') + \
'-' + \
self.seq_log['stimulation']['stim_name'] + \
'-M' + \
self.mouse_id + \
'-' + \
self.user_id
except KeyError:
self.file_name = datetime.datetime.now().strftime('%y%m%d%H%M%S') + \
'-' + 'customStim' + '-M' + self.mouse_id + '-' + \
self.user_id
file_number = self._get_file_number()
if self.is_triggered:
self.file_name += '-' + str(file_number)+'-Triggered'
else:
self.file_name += '-' + str(file_number) + '-notTriggered'
def _get_file_number(self):
"""
get synced file number for log file name
"""
try:
file_num_task = iodaq.DigitalInput(self.file_num_NI_dev,
self.file_num_NI_port,
self.file_num_NI_lines)
file_num_task.StartTask()
array = file_num_task.read()
num_str = (''.join([str(line) for line in array]))[::-1]
file_number = int(num_str, 2)
# print array, file_number
except Exception as e:
print e
file_number = None
return file_number
def _display(self, window, stim):
# display frames
time_stamp=[]
start_time = time.clock()
singleRunFrames = self.sequence.shape[0]
if self.is_sync_pulse_pulse:
syncPulseTask = iodaq.DigitalOutput(self.sync_pulse_NI_dev,
self.sync_pulse_NI_port,
self.sync_pulse_NI_line)
syncPulseTask.StartTask()
_ = syncPulseTask.write(np.array([0]).astype(np.uint8))
i = 0
while self.keep_display and i < (singleRunFrames * self.display_iter):
if self.display_order == 1:
frame_num = i % singleRunFrames
if self.display_order == -1:
frame_num = singleRunFrames - (i % singleRunFrames) -1
stim.setImage(self.sequence[frame_num][::-1,:])
stim.draw()
time_stamp.append(time.clock()-start_time)
#set syncPuls signal
if self.is_sync_pulse_pulse:
_ = syncPulseTask.write(np.array([1]).astype(np.uint8))
#show visual stim
window.flip()
#set syncPuls signal
if self.is_sync_pulse_pulse:
_ = syncPulseTask.write(np.array([0]).astype(np.uint8))
self._update_display_status()
i=i+1
stop_time = time.clock()
window.close()
if self.is_sync_pulse_pulse:
syncPulseTask.StopTask()
self.time_stamp = np.array(time_stamp)
self.display_length = stop_time-start_time
if self.display_frames is not None:
self.display_frames = self.display_frames[:i]
if self.keep_display == True:
print '\nDisplay successfully completed.'
def flag_to_close(self):
self.keep_display = False
def _update_display_status(self):
if self.keep_display is None:
raise LookupError, 'self.keep_display should start as True'
#check keyboard input 'q' or 'escape'
keyList = event.getKeys(['q','escape'])
if len(keyList) > 0:
self.keep_display = False
print "Keyboard stop signal detected. Stop displaying. \n"
def set_display_order(self, display_order):
self.display_order = display_order
self.clear()
def set_display_iteration(self, display_iter):
if display_iter % 1 == 0:
self.display_iter = display_iter
else:
raise ArithmeticError, "`display_iter` should be a whole number."
self.clear()
def save_log(self):
if self.display_length is None:
self.clear()
raise LookupError, "Please display sequence first!"
if self.file_name is None:
self._get_file_name()
if self.keep_display == True:
self.file_name += '-complete'
elif self.keep_display == False:
self.file_name += '-incomplete'
#set up log object
directory = self.log_dir + '\sequence_display_log'
if not(os.path.isdir(directory)):
os.makedirs(directory)
logFile = dict(self.seq_log)
displayLog = dict(self.__dict__)
displayLog.pop('seq_log')
displayLog.pop('sequence')
logFile.update({'presentation':displayLog})
file_name = self.file_name + ".pkl"
#generate full log dictionary
path = os.path.join(directory, file_name)
ft.saveFile(path,logFile)
print ".pkl file generated successfully."
backupFileFolder = self._get_backup_folder()
if backupFileFolder is not None:
if not (os.path.isdir(backupFileFolder)):
os.makedirs(backupFileFolder)
backupFilePath = os.path.join(backupFileFolder,file_name)
ft.saveFile(backupFilePath,logFile)
print ".pkl backup file generate successfully"
else:
print "did not find backup path, no backup has been saved."
def _get_backup_folder(self):
if self.file_name is None:
raise LookupError, 'self.file_name not found.'
else:
if self.backupdir is not None:
curr_date = self.file_name[0:6]
stim_name = self.seq_log['stimulation']['stim_name']
if 'KSstim' in stim_name:
backupFileFolder = \
os.path.join(self.backupdir,
curr_date+'-M'+self.mouse_id+'-Retinotopy')
else:
backupFileFolder = \
os.path.join(self.backupdir,
curr_date+'-M'+self.mouse_id+'-'+stim_name)
return backupFileFolder
else:
return None
def clear(self):
""" clear display information. """
self.display_length = None
self.time_stamp = None
self.frame_duration = None
self.display_frames = None
self.frame_stats = None
self.file_name = None
self.keep_display = None
if __name__ == "__main__":
pass | /retinotopic_maps-2.0.0.tar.gz/retinotopic_maps-2.0.0/retinotopic_mapping/DisplayStimulus.py | 0.710929 | 0.506774 | DisplayStimulus.py | pypi |
from typing import (
cast,
Literal,
Optional,
Any,
Generator,
Union,
Iterable,
TypedDict,
Protocol,
)
from datetime import datetime
from google.cloud.exceptions import Conflict, NotFound
from google.cloud.firestore_v1.base_query import BaseQuery
from google.cloud.firestore_v1.transforms import Sentinel, ArrayUnion, ArrayRemove
from google.cloud.firestore_v1.field_path import (
get_field_path,
FieldPath,
)
from google.cloud.firestore_v1 import (
Client,
DocumentReference,
DocumentSnapshot,
CollectionReference,
Transaction,
transactional,
DELETE_FIELD,
SERVER_TIMESTAMP,
)
_free_marker_literal = Literal["_FREE_MARKER_"]
_free_marker: _free_marker_literal = "_FREE_MARKER_"
_owned_marker_literal = Literal["_OWNED_MARKER_"]
_owned_marker: _owned_marker_literal = "_OWNED_MARKER_"
_retired_marker_literal = Literal["_RETIRED_MARKER_"]
_retired_marker: _retired_marker_literal = "_RETIRED_MARKER_"
_active_field_literal = Literal["_ACTIVE_"]
_active_field: _active_field_literal = "_ACTIVE_"
OwnerDataContainer = dict[str, Any]
class OwnerDataContainer(TypedDict, total=True):
state: Union[_free_marker_literal, _owned_marker_literal, _retired_marker_literal]
data: dict[str, Any]
modified: Union[Sentinel, datetime]
class OwnedDataContainer(OwnerDataContainer):
tag: str
ResourceOwnersDict = dict[str, OwnerDataContainer]
class RetirableResourceException(Exception):
pass
class ResourceDoesNotExist(RetirableResourceException):
pass
class ResourceNotAllocated(RetirableResourceException):
pass
class OwnerDoesNotExist(RetirableResourceException):
pass
class UpdateCommand(Protocol):
def _update(self, data: dict[str, Any]) -> None:
"""Update the dict with fields names and updates to add"""
class DeleteValue:
def __init__(self, key: str):
self._key = key
def _update(self, data: dict[str, Any]) -> None:
data[self._key] = DELETE_FIELD
def __repr__(self):
return f"DeleteValue({self._key})"
class SetValue:
def __init__(self, key: str, value: Any):
self._key = key
self._value = value
def _update(self, data: dict[str, Any]) -> None:
data[self._key] = self._value
def __repr__(self):
return f"SetValue({self._key}, value: {type(self._value).__name__})"
class AddToList:
def __init__(self, key: str, *values: Any):
self._key = key
self._values = values
def _update(self, data: dict[str, Any]) -> None:
data[self._key] = ArrayUnion(self._values)
def __repr__(self):
values_description = ",".join(type(value).__name__ for value in self._values)
return f"AddToList({self._key}) with items {values_description}"
class _ResourceManagerBase:
def __init__(self, root_doc_path: Union[str, tuple[str], list[str]]):
if isinstance(root_doc_path, str):
root_path = root_doc_path.split("/")
elif isinstance(root_doc_path, (list, tuple)):
root_path = tuple(root_doc_path)
else:
raise TypeError(
"root_doc_path must be a str, list[str] or tuple[str]", root_doc_path
)
if not root_path or len(root_path) % 2 != 0:
raise ValueError("root path must be a valid document path", root_path)
self._root_doc_path: tuple[str] = tuple(root_path)
@property
def root_path(self):
return self._root_doc_path
@staticmethod
def _escape_field(name: str) -> str:
return get_field_path((name,))
def _child_path(self, *parts: str) -> tuple[str]:
return FieldPath(*self._root_doc_path + parts).parts
def _resource_path(self, resource: str) -> tuple[str]:
return self._child_path("resources", resource)
def _new_owner_data(self) -> dict[str, Any]:
return {
"state": _free_marker,
"data": {},
"modified": SERVER_TIMESTAMP,
}
class RetirableResourceManager(_ResourceManagerBase):
def __init__(
self, root_doc_path: Union[str, tuple[str], list[str]], *, client: Client
):
"""Initialize retirable resources manager, situated at `root_doc_path`,
using the provided Firestore client.
`root_doc_path` is either a slash-delimited string, or a sequence of path
segments, and must refer to a document location, not a collection location,
in firestore.
Raises a ValueError if `root_doc_path` does not have an even number of
path elements.
"""
super().__init__(root_doc_path)
self._client = client
def list_owners(self) -> list[str]:
"""Returns list of owners"""
return self._root_dict().get("owners", [])
def set_owners(self, owners: list[str]) -> None:
"""Set the owners to `owners`"
Update all active resources with the new owners list, preserving
tags for owners that did not change.
"""
@transactional
def t_update(transaction: Transaction) -> None:
previous_owners = self._owners(transaction=transaction)
new_owners = set(owners)
update_spec = {
**{k: DELETE_FIELD for k in previous_owners - new_owners},
**{k: self._new_owner_data() for k in new_owners - previous_owners},
}
# NB: transactions require all reads before all writes
active_resources = self._active_resources_list(transaction=transaction)
transaction.set(self._root_docref(), {"owners": owners}, merge=True)
for doc in active_resources:
# TODO: does this mean we have a max of 500 resources here,
# due to the limit on operations in one tranaction?
transaction.update(doc.reference, update_spec)
t_update(self._client.transaction())
def take(self, owner: str, tag: str) -> Optional[str]:
"""Take an additional free resource for the owner"""
@transactional
def t_take(transaction: Transaction) -> Optional[str]:
resource = self._find_free_resource_for(owner, transaction=transaction)
if resource is None:
return None
else:
self._set_tag(resource, owner=owner, tag=tag, transaction=transaction)
return resource
return t_take(self._client.transaction())
def status(self, resource: str, owner: str) -> Literal["owned", "free", "retired"]:
"""Return the status of the resource as owned by `owner`
The status is "owner", "free", or "retired"
"""
owner_dict = self._owner_data_container(resource, owner)
state = owner_dict["state"]
if state == _owned_marker:
return "owned"
if state == _free_marker:
return "free"
if state == _retired_marker:
return "retired"
raise Exception("owner_dict in invalid state", owner_dict)
def dispose_all_resources(self) -> None:
"""Dispose all resources"""
for doc in self._resources_collection().stream():
cast(DocumentReference, doc.reference).delete()
def dispose_resource(self, resource: str) -> None:
"""Dispose a single resource"""
self._resource_docref(resource).delete()
def dispose(self) -> None:
"""Dispose everything"""
self.dispose_all_resources()
self._root_docref().delete()
def retire_resource(self, resource: str) -> None:
"""Retire the resource"""
@transactional
def t_retire_resource(transaction):
self._t_retire_resource(transaction, resource)
t_retire_resource(self._client.transaction())
def retire(
self, resource: str, owner: str
) -> Literal["resource retired", "resource active"]:
"""Retire ownership of a resource"""
@transactional
def t_retire(
transaction: Transaction,
) -> Literal["resource retired", "resource active"]:
if self._active_owners(resource, transaction=transaction) == {owner}:
self._t_retire_resource(transaction, resource)
return "resource retired"
else:
transaction.update(
self._resource_docref(resource),
{
FieldPath(owner, "state").to_api_repr(): _retired_marker,
FieldPath(owner, "tag").to_api_repr(): DELETE_FIELD,
},
)
return "resource active"
return t_retire(self._client.transaction())
def free(self, resource: str, owner: str) -> None:
"""Free the resource for the owner"""
self._resource_docref(resource).update(
{
FieldPath(owner, "state").to_api_repr(): _free_marker,
FieldPath(owner, "tag").to_api_repr(): DELETE_FIELD,
}
)
def is_active(self, resource: str) -> Optional[bool]:
"""Returns True if active, False if retired, None if does not exist"""
data = self._resource_doc(resource).to_dict()
if data is None:
return None
return data[_active_field]
def add_resource(self, resource: str) -> Literal["ok", "already exists"]:
"""Add the resource, with a free tag for each owner.
Returns "ok" if the resource was created, or "already exists" if
the resource already exists.
"""
try:
self._resource_docref(resource).create(self._new_resource_data())
return "ok"
except Conflict:
return "already exists"
def resource_exists(self, resource: str) -> bool:
"""Return True if this resource exists.
The resource may be active or may be retired.
"""
return self._resource_doc(resource).exists
def get_data(self, resource: str, owner: str) -> dict:
""""""
return self._get_owner_data_container(resource, owner=owner)["data"]
def update_data(
self, resource: str, owner: str, *update_commands: UpdateCommand
) -> None:
""""""
updates = {}
for command in update_commands:
command._update(updates)
docref = self._resource_docref(resource)
try:
docref.update(
{
**{
FieldPath(owner, "data", key).to_api_repr(): value
for key, value in updates.items()
},
FieldPath(owner, "modified").to_api_repr(): SERVER_TIMESTAMP,
}
)
except NotFound:
raise ResourceDoesNotExist(resource)
def list_allocation(self, owner: str, tag: str) -> set[str]:
"""Returns set of resources allocated to the owner and tag"""
docs = (
self._resources_collection()
.where(
FieldPath(owner, "tag").to_api_repr(),
"==",
tag,
)
.select(())
.get()
)
return {doc.id for doc in docs}
def request_allocation(self, owner: str, tag: str, qty: int) -> set[str]:
"""Request allocation of resource for the owner and the tag.
Returns the allocated resources, which might be less than what
was requested.
"""
allocated_resources = self.list_allocation(owner, tag)
num_allocated = len(allocated_resources)
num_to_take = max(0, qty - num_allocated)
num_to_free = max(0, num_allocated - qty)
if num_to_take:
for _ in range(num_to_take):
resource = self.take(owner, tag)
if resource is None:
break
allocated_resources.add(resource)
if num_to_free:
for _ in range(num_to_free):
self.free(allocated_resources.pop(), owner)
return allocated_resources
def clear_allocation(self, owner: str) -> None:
"""Clear all allocations for the owner"""
for resource in self._resources_by_state(owner, _owned_marker):
self.free(resource, owner)
def free_allocation_count(self, owner: str) -> int:
"""How many resources are available to become allocated for owner
Caution: Because Firestore has no `count` operation, this operation may
be unexpectedly expensive.
"""
return len(self._resources_by_state(owner, _free_marker))
def when_modified(self, resource: str, owner: str) -> datetime:
owner_dict = self._owner_data_container(resource, owner)
return owner_dict["modified"]
@staticmethod
def _doc_owner_dict(doc: DocumentSnapshot, owner: str) -> datetime:
owner_dict = doc.to_dict().get(owner)
if owner_dict is None:
raise Exception("invalid owner_dict for doc", doc.id)
return owner_dict
def _owner_data_container(self, resource: str, owner: str) -> OwnerDataContainer:
owners_dict = self._resource_owners_dict(resource)
if owners_dict is None:
raise ResourceDoesNotExist(resource)
owner_dict = owners_dict.get(owner)
if owner_dict is None:
raise OwnerDoesNotExist(resource, owner)
return owner_dict
def _resources_by_state(self, owner: str, state: str) -> set[str]:
docs = (
self._resources_collection()
.where(
FieldPath(owner, "state").to_api_repr(),
"==",
state,
)
.select(())
.get()
)
return {doc.id for doc in docs}
def _get_owner_data_container(
self, resource: str, *, owner: str, transaction: Optional[Transaction] = None
) -> OwnerDataContainer:
owners_dict = self._resource_owners_dict(resource, transaction=transaction)
if owners_dict is None:
raise ResourceDoesNotExist(resource)
data = owners_dict.get(owner)
if data is None:
raise OwnerDoesNotExist(resource, owner)
return data
def _find_free_resource_for(
self, owner: str, transaction: Optional[Transaction] = None
) -> Optional[str]:
""""""
result = (
self._resources_collection()
.where(
FieldPath(owner, "state").to_api_repr(),
"==",
_free_marker,
)
.limit(1)
.get(transaction=transaction)
)
docs = list(result)
return docs[0].id if len(docs) else None
def _t_retire_resource(
self,
transaction: Transaction,
resource: str,
) -> None:
doc = self._resource_doc(resource, transaction=transaction)
if not doc.exists:
raise ResourceDoesNotExist(resource)
data = doc.to_dict()
owners = [key for key in data.keys() if key != _active_field]
updates = {
_active_field: False,
}
for owner in owners:
updates[FieldPath(owner, "state").to_api_repr()] = _retired_marker
updates[FieldPath(owner, "tag").to_api_repr()] = DELETE_FIELD
transaction.update(self._resource_docref(resource), updates)
def _set_tag(
self,
resource: str,
/,
*,
owner: str,
tag: str,
transaction: Optional[Transaction] = None,
) -> None:
"""Set the tag for this resource
This is only called when the resource is free
"""
updatee = transaction if transaction else DocumentReference
updatee.update(
self._resource_docref(resource),
{
FieldPath(owner, "tag").to_api_repr(): tag,
FieldPath(owner, "state").to_api_repr(): _owned_marker,
},
)
def _active_owners(
self, resource: str, *, transaction: Optional[Transaction] = None
) -> set[str]:
"""Active owners are owners of a resource that are not retired"""
owners_dict = self._resource_owners_dict(resource, transaction=transaction)
return (
set()
if owners_dict is None
else set(
owner
for owner, v in owners_dict.items()
if v["state"] != _retired_marker
)
)
def _resources_collection(self) -> CollectionReference:
return self._client.collection(*self._child_path("resources"))
def _resource_owners_dict(
self, resource: str, transaction: Optional[Transaction] = None
) -> Optional[ResourceOwnersDict]:
"""Returns a ResourceOwnersDict, or None if the resource does not
exist.
Keys are owners, as str
Values are `OwnerDataContainer` objects
"""
data = self._resource_doc(resource, transaction=transaction).to_dict()
if data is None:
return None
del data[_active_field]
return data
def _resource_doc(
self, resource: str, *, transaction: Optional[Transaction] = None
) -> DocumentSnapshot:
return self._resource_docref(resource).get(transaction=transaction)
def _resource_docref(self, resource: str) -> DocumentReference:
return self._client.document(*self._resource_path(resource))
def _root_docref(self) -> DocumentReference:
return self._client.document(*self._root_doc_path)
def _root_dict(
self, *, transaction: Optional[Transaction] = None
) -> dict[str, Any]:
return self._root_docref().get(transaction=transaction).to_dict() or {}
def _active_resources_query(self) -> BaseQuery:
return self._resources_collection().where(_active_field, "==", True)
def _active_resources(self) -> Generator[DocumentSnapshot, Any, None]:
return self._active_resources_query().stream()
def _active_resources_list(
self, transaction: Optional[Transaction] = None
) -> Iterable[DocumentSnapshot]:
return self._active_resources_query().get(transaction=transaction)
def _owners(self, *, transaction: Optional[Transaction] = None) -> set[str]:
return set(self._root_dict(transaction=transaction).get("owners", set()))
def _new_resource_data(self) -> dict[str, Any]:
return {
_active_field: True,
**{
FieldPath(k).to_api_repr(): self._new_owner_data()
for k in self._owners()
},
} | /retirable_resources-0.1.9.tar.gz/retirable_resources-0.1.9/retirable_resources/resource_manager.py | 0.647018 | 0.157266 | resource_manager.py | pypi |
# http://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
http_status_codes = {
100: '100 Continue',
101: '101 Switching Protocols',
102: '102 Processing',
200: '200 OK',
201: '201 Created',
202: '202 Accepted',
203: '203 Non-Authoritative Information',
204: '204 No Content',
205: '205 Reset Content',
206: '206 Partial Content',
207: '207 Multi-Status',
208: '208 Already Reported',
226: '226 IM Used',
300: '300 Multiple Choices',
301: '301 Moved Permanently',
302: '302 Found',
303: '303 See Other',
304: '304 Not Modified',
305: '305 Use Proxy',
306: '306 (Unused)',
307: '307 Temporary Redirect',
308: '308 Permanent Redirect',
400: '400 Bad Request',
401: '401 Unauthorized',
402: '402 Payment Required',
403: '403 Forbidden',
404: '404 Not Found',
405: '405 Method Not Allowed',
406: '406 Not Acceptable',
407: '407 Proxy Authentication Required',
408: '408 Request Timeout',
409: '409 Conflict',
410: '410 Gone',
411: '411 Length Required',
412: '412 Precondition Failed',
413: '413 Payload Too Large',
414: '414 URI Too Long',
415: '415 Unsupported Media Type',
416: '416 Range Not Satisfiable',
417: '417 Expectation Failed',
421: '421 Misdirected Request',
422: '422 Unprocessable Entity',
423: '423 Locked',
424: '424 Failed Dependency',
426: '426 Upgrade Required',
428: '428 Precondition Required',
429: '429 Too Many Requests',
431: '431 Request Header Fields Too Large',
451: '451 Unavailable For Legal Reasons',
500: '500 Internal Server Error',
501: '501 Not Implemented',
502: '502 Bad Gateway',
503: '503 Service Unavailable',
504: '504 Gateway Timeout',
505: '505 HTTP Version Not Supported',
506: '506 Variant Also Negotiates',
507: '507 Insufficient Storage',
508: '508 Loop Detected',
510: '510 Not Extended',
511: '511 Network Authentication Required',
} | /retort-cgi-1.0.1.tar.gz/retort-cgi-1.0.1/retort/data.py | 0.7011 | 0.25682 | data.py | pypi |
import functools
import logging
import numbers
import time
try:
import pbr.version
except ImportError:
# The version is only programatically available in some contexts and you
# must have pbr installed. Since we don't want to enforce that dependency
# this may not work. Also, the version isn't available when vendoring.
__version__ = None
else:
__version__ = pbr.version.VersionInfo("retrace").version_string_with_vcs()
_LOG = logging.getLogger("retrace")
class RetraceException(BaseException):
"""
The base exception to be used by all Retrace exceptions. This is the one
to catch if you want to catch anything we do and will ever raise.
"""
class LimitReached(RetraceException):
"""
Raised by Retrace limiters when the method has exhausted allowed attempts
"""
class _BaseAction(object):
"""
The base exception to be used by all custom intervals and limiters.
"""
def __str__(self):
return self.__class__.__name__
class Interval(_BaseAction):
"""
The base interval class. It provides no interval by default.
"""
def delay(self, attempt_number):
return
class Sleep(Interval):
"""
Sleep a set number of seconds between retries.
"""
def __init__(self, delay):
self._delay = delay
def delay(self, attempt_number):
_LOG.debug("Sleeping for %s seconds", self._delay)
time.sleep(self._delay)
class Limit(_BaseAction):
"""
The base limit class. It provides no limits by default.
"""
def attempt(self, attempt):
return True
class Count(Limit):
"""
Limit retrying to a specific number of attempts
"""
def __init__(self, max):
self.max = max
def attempt(self, attempt_number):
if attempt_number >= self.max:
raise LimitReached()
class Validator(_BaseAction):
def __init__(self):
pass
def validate(self, result):
return True
class Match(Validator):
def __init__(self, value):
self._value = value
def validate(self, result):
return result == self._value
class Fn(_BaseAction):
"""
Call a function to dictate the limit or delay.
"""
def __init__(self, fn):
self.fn = fn
def delay(self, attempt_number):
return self.fn(attempt_number)
def attempt(self, attempt_number):
return self.fn(attempt_number)
def validate(self, result):
return self.fn(result)
def retry(*dargs, **dkwargs):
"""
The retry decorator. Can be passed all the arguments that are accepted by
Retry.__init__.
"""
# support both @retry and @retry() as valid syntax
if len(dargs) == 1 and callable(dargs[0]):
def wrap_simple(f):
@functools.wraps(f)
def wrapped_f(*args, **kw):
return Retry()(f, *args, **kw)
return wrapped_f
return wrap_simple(dargs[0])
else:
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kw):
return Retry(*dargs, **dkwargs)(f, *args, **kw)
return wrapped_f
return wrap
class Retry(object):
"""The Retry decorator class.
This class handles the retry process, calling wither limiters or interval
objects which control the retry flow.
"""
def __init__(self, on_exception=Exception, limit=5, interval=None, validator=None):
"""Configure how a function should be retried.
Args:
on_exception (BaseException): The exception to catch. Use this to
set which exception and it's subclasses to catch.
limit ()
"""
self.attempts = 0
self._on_exception = on_exception
self._setup_limit(limit)
self._setup_interval(interval)
self._setup_validator(validator)
def _setup_limit(self, limit):
if limit is None:
self._limit = Limit()
elif isinstance(limit, numbers.Number):
self._limit = Count(limit)
elif callable(limit) and not isinstance(limit, Fn):
self._limit = Fn(limit)
else:
self._limit = limit
if limit is not None:
_LOG.debug("Adding limiter '%s' to decorator", self._nice_name(self._limit))
def _setup_interval(self, interval):
if interval is None:
self._interval = Interval()
elif isinstance(interval, numbers.Number):
self._interval = Sleep(interval)
elif callable(interval) and not isinstance(interval, Fn):
self._interval = Fn(interval)
else:
self._interval = interval
if interval is not None:
_LOG.debug(
"Adding interval '%s' to decorator", self._nice_name(self._interval)
)
def _setup_validator(self, validator):
if validator is None:
self._validator = Validator()
elif callable(validator):
self._validator = Fn(validator)
else:
self._validator = Match(value=validator)
if validator is not None:
_LOG.debug(
"Adding validator '%s' to decorator", self._nice_name(self._validator)
)
def _nice_name(self, thing):
mod = getattr(thing, "__module__")
name = getattr(thing, "__name__", str(thing))
return "{}.{}".format(mod, name)
def _limit_reached(self):
try:
# On a failure, the attempt call decides if we should try
# again. It should raise a LimitReached if we should stop.
self._limit.attempt(self.attempts)
return False
except LimitReached:
return True
def __call__(self, fn, *args, **kwargs):
fn_name = self._nice_name(fn)
while True:
self.attempts += 1
try:
_LOG.debug("Calling %s. Attempt %s", fn_name, self.attempts)
result = fn(*args, **kwargs)
except self._on_exception as e:
if isinstance(e, RetraceException):
raise
if self._limit_reached():
raise
_LOG.exception("Caught exception when calling %s", fn_name)
else:
valid = self._validator.validate(result)
if valid:
return result
if self._limit_reached():
raise LimitReached("Validator failed and the limit was reached")
# Call delay, it should block for however long we should delay
# before trying again.
self._interval.delay(self.attempts) | /retrace-3.0.0.tar.gz/retrace-3.0.0/src/retrace.py | 0.59749 | 0.234472 | retrace.py | pypi |
<p align="center">
<a href="https://github.com/gabrielguarisa/retrack"><img src="https://raw.githubusercontent.com/gabrielguarisa/retrack/main/logo.png" alt="retrack"></a>
</p>
<p align="center">
<em>A business rules engine</em>
</p>
<div align="center">
[](https://pypi.org/project/retrack/)
[](https://github.com/psf/black)
[](https://github.com/gabrielguarisa/retrack/releases)
[](https://github.com/gabrielguarisa/retrack/blob/main/LICENSE)
</div>
## Installation
```bash
pip install retrack
```
## Usage
```python
import retrack
runner = retrack.Runner.from_json("your-rule.json")
response = runner.execute(input_data)
```
Or, if you want to create the parser and runner manually:
```python
import retrack
# Parse the rule/model
parser = retrack.Parser(rule)
# Create a runner
runner = retrack.Runner(parser)
# Run the rule/model passing the data
runner.execute(data)
```
The `Parser` class parses the rule/model and creates a graph of nodes. The `Runner` class runs the rule/model using the data passed to the runner. The `data` is a dictionary or a list of dictionaries containing the data that will be used to evaluate the conditions and execute the actions. To see wich data is required for the given rule/model, check the `runner.request_model` property that is a pydantic model used to validate the data.
### Creating a rule/model
A rule is a set of conditions and actions that are executed when the conditions are met. The conditions are evaluated using the data passed to the runner. The actions are executed when the conditions are met.
Each rule is composed of many nodes. To see each node type, check the [nodes](https://github.com/gabrielguarisa/retrack/tree/main/retrack/nodes) folder.
To create a rule, you need to create a JSON file with the following structure:
```json
{
"nodes": {
"node id": {
"id": "node id",
"data": {},
"inputs": {},
"outputs": {},
"name": "node name",
},
// ... more nodes
}
}
```
The `nodes` key is a dictionary of nodes. Each node has the following properties:
- `id`: The node id. This is used to reference the node in the `inputs` and `outputs` properties.
- `data`: The node data. This is used as a metadata for the node.
- `inputs`: The node inputs. This is used to reference the node inputs.
- `outputs`: The node outputs. This is used to reference the node outputs.
- `name`: The node name. This is used to define the node type.
The `inputs` and `outputs` properties are dictionaries of node connections. Each connection has the following properties:
- `node`: The node id that is connected to the current node.
- `input`: The input name of the connection that is connected to the current node. This is only used in the `inputs` property.
- `output`: The output name of the connection that is connected to the current node. This is only used in the `outputs` property.
To see some examples, check the [examples](https://github.com/gabrielguarisa/retrack/tree/main/examples) folder.
### Creating a custom node
To create a custom node, you need to create a class that inherits from the `BaseNode` class. Each node is a pydantic model, so you can use pydantic features to create your custom node. To see the available features, check the [pydantic documentation](https://pydantic-docs.helpmanual.io/).
To create a custom node you need to define the inputs and outputs of the node. To do this, you need to define the `inputs` and `outputs` class attributes. Let's see an example of a custom node that has two inputs, sum them and return the result:
```python
import retrack
import pydantic
import pandas as pd
import typing
class SumInputsModel(pydantic.BaseModel):
input_value_0: retrack.InputConnectionModel
input_value_1: retrack.InputConnectionModel
class SumOutputsModel(pydantic.BaseModel):
output_value: retrack.OutputConnectionModel
class SumNode(retrack.BaseNode):
inputs: SumInputsModel
outputs: SumOutputsModel
def run(self, input_value_0: pd.Series,
input_value_1: pd.Series,
) -> typing.Dict[str, pd.Series]:
output_value = input_value_0.astype(float) + input_value_1.astype(float)
return {
"output_value": output_value,
}
```
After creating the custom node, you need to register it in the nodes registry and pass the registry to the parser. Let's see an example:
```python
import retrack
# Register the custom node
retrack.component_registry.register_node("sum", SumNode)
# Parse the rule/model
parser = Parser(rule, component_registry=retrack.component_registry)
```
## Contributing
Contributions are welcome! Please read the [contributing guidelines](https://github.com/gabrielguarisa/retrack/tree/main/CONTRIBUTING.md) first. | /retrack-0.8.1.tar.gz/retrack-0.8.1/README.md | 0.538255 | 0.975343 | README.md | pypi |
# retrie
[](https://github.com/ddelange/retrie/actions?query=branch%3Amaster)
[](https://codecov.io/gh/ddelange/retrie)
[](https://pypi.org/project/retrie/)
[](https://pypi.org/project/retrie/)
[](https://pypistats.org/packages/retrie)
[](https://github.com/python/black)
[retrie](https://github.com/ddelange/retrie) offers fast methods to match and replace (sequences of) strings based on efficient Trie-based regex unions.
#### Trie
Instead of matching against a simple regex union, which becomes slow for large sets of words, a more efficient regex pattern can be compiled using a [Trie](https://en.wikipedia.org/wiki/Trie) structure:
```py
from retrie.trie import Trie
trie = Trie()
trie.add("abc", "foo", "abs")
assert trie.pattern() == "(?:ab[cs]|foo)" # equivalent to but faster than "(?:abc|abs|foo)"
trie.add("absolute")
assert trie.pattern() == "(?:ab(?:c|s(?:olute)?)|foo)"
trie.add("abx")
assert trie.pattern() == "(?:ab(?:[cx]|s(?:olute)?)|foo)"
trie.add("abxy")
assert trie.pattern() == "(?:ab(?:c|s(?:olute)?|xy?)|foo)"
```
## Installation
This pure-Python, OS independent package is available on [PyPI](https://pypi.org/project/retrie):
```sh
$ pip install retrie
```
## Usage
[](https://retrie.readthedocs.io)
For documentation, see [retrie.readthedocs.io](https://retrie.readthedocs.io/en/stable/_code_reference/retrie.html).
The following objects are all subclasses of `retrie.retrie.Retrie`, which handles filling the Trie and compiling the corresponding regex pattern.
#### Blacklist
The `Blacklist` object can be used to filter out bad occurences in a text or a sequence of strings:
```py
from retrie.retrie import Blacklist
# check out docstrings and methods
help(Blacklist)
blacklist = Blacklist(["abc", "foo", "abs"], match_substrings=False)
blacklist.compiled
# re.compile(r'(?<=\b)(?:ab[cs]|foo)(?=\b)', re.IGNORECASE|re.UNICODE)
assert not blacklist.is_blacklisted("a foobar")
assert tuple(blacklist.filter(("good", "abc", "foobar"))) == ("good", "foobar")
assert blacklist.cleanse_text(("good abc foobar")) == "good foobar"
blacklist = Blacklist(["abc", "foo", "abs"], match_substrings=True)
blacklist.compiled
# re.compile(r'(?:ab[cs]|foo)', re.IGNORECASE|re.UNICODE)
assert blacklist.is_blacklisted("a foobar")
assert tuple(blacklist.filter(("good", "abc", "foobar"))) == ("good",)
assert blacklist.cleanse_text(("good abc foobar")) == "good bar"
```
#### Whitelist
Similar methods are available for the `Whitelist` object:
```py
from retrie.retrie import Whitelist
# check out docstrings and methods
help(Whitelist)
whitelist = Whitelist(["abc", "foo", "abs"], match_substrings=False)
whitelist.compiled
# re.compile(r'(?<=\b)(?:ab[cs]|foo)(?=\b)', re.IGNORECASE|re.UNICODE)
assert not whitelist.is_whitelisted("a foobar")
assert tuple(whitelist.filter(("bad", "abc", "foobar"))) == ("abc",)
assert whitelist.cleanse_text(("bad abc foobar")) == "abc"
whitelist = Whitelist(["abc", "foo", "abs"], match_substrings=True)
whitelist.compiled
# re.compile(r'(?:ab[cs]|foo)', re.IGNORECASE|re.UNICODE)
assert whitelist.is_whitelisted("a foobar")
assert tuple(whitelist.filter(("bad", "abc", "foobar"))) == ("abc", "foobar")
assert whitelist.cleanse_text(("bad abc foobar")) == "abcfoo"
```
#### Replacer
The `Replacer` object does a fast single-pass search & replace for occurrences of `replacement_mapping.keys()` with corresponding values.
```py
from retrie.retrie import Replacer
# check out docstrings and methods
help(Replacer)
replacement_mapping = dict(zip(["abc", "foo", "abs"], ["new1", "new2", "new3"]))
replacer = Replacer(replacement_mapping, match_substrings=True)
replacer.compiled
# re.compile(r'(?:ab[cs]|foo)', re.IGNORECASE|re.UNICODE)
assert replacer.replace("ABS ...foo... foobar") == "new3 ...new2... new2bar"
replacer = Replacer(replacement_mapping, match_substrings=False)
replacer.compiled
# re.compile(r'\b(?:ab[cs]|foo)\b', re.IGNORECASE|re.UNICODE)
assert replacer.replace("ABS ...foo... foobar") == "new3 ...new2... foobar"
replacer = Replacer(replacement_mapping, match_substrings=False, re_flags=None)
replacer.compiled # on py3, re.UNICODE is always enabled
# re.compile(r'\b(?:ab[cs]|foo)\b')
assert replacer.replace("ABS ...foo... foobar") == "ABS ...new2... foobar"
replacer = Replacer(replacement_mapping, match_substrings=False, word_boundary=" ")
replacer.compiled
# re.compile(r'(?<= )(?:ab[cs]|foo)(?= )', re.IGNORECASE|re.UNICODE)
assert replacer.replace(". ABS ...foo... foobar") == ". new3 ...foo... foobar"
```
## Development
[](https://github.com/carloscuesta/gitmoji-cli)
[](https://github.com/pre-commit/pre-commit)
Run `make help` for options like installing for development, linting and testing.
| /retrie-0.2.3.tar.gz/retrie-0.2.3/README.md | 0.620162 | 0.898767 | README.md | pypi |
# The observability actor should store all data, but only expose a subset of it by default.
"""
What do we want to see?
- What actions are being taken by each actor
- See the path of messages through the system
"""
import json
from typing import Type, Optional
from types import TracebackType
import termcolor
import tqdm
from retriever_research.shared_memory import SharedMemory
from retriever_research import messages
from retriever_research.ticker import Ticker
from retriever_research.config import Config, LogLevels
from retriever_research.actors.pykka_extensions.custom_actor import RetrieverThreadingActor
def print_log_line(log: messages.LogMessage):
print(f'{log.timestamp.strftime("%H:%M:%S.%f")} - [{log.actor}] {log.log}')
class LoggingActor(RetrieverThreadingActor):
# This actor must not reference any other actors. It is created before any others.
can_log = False
def __init__(self, output_file="retriever.log"):
super().__init__(urn=Config.LOGGING_ACTOR_URN)
self.output_file = output_file
self.log_fileobj = open(self.output_file, "w")
self.ignore_actor_list = []
self.progress_bar = None # type: Optional[tqdm.tqdm]
self.progress_update = 0
def on_receive(self, msg):
try:
assert isinstance(msg, (messages.LogMessage, messages.ProgressUpdateMessage, messages.ProgressInitMessage, messages.CloseProgressBar))
if isinstance(msg, messages.CloseProgressBar):
if self.progress_bar is not None:
self.progress_bar.close()
self.progress_bar = None
# TQDM might not end with a newline
print()
if isinstance(msg, messages.ProgressInitMessage):
self.progress_bar = tqdm.tqdm(total=msg.total_files)
self.progress_bar.update(self.progress_update)
self.progress_update = 0
if isinstance(msg, messages.ProgressUpdateMessage):
self.progress_update += 1
if self.progress_bar is not None:
self.progress_bar.update(self.progress_update)
self.progress_update = 0
if isinstance(msg, messages.LogMessage):
structured_log = dict(
level=msg.level,
log=msg.log,
actor=msg.actor,
timestamp=msg.timestamp.isoformat(),
)
if msg.log_id:
structured_log["log_id"] = msg.log_id
if msg.tags and len(msg.tags) > 0:
structured_log["tags"] = msg.tags
log_line = json.dumps(structured_log)
self.log_fileobj.write(f"{log_line}\n")
self.log_fileobj.flush()
if structured_log["level"] in [LogLevels.INFO, LogLevels.ERROR]:
print_log_line(msg)
except Exception as e:
termcolor.cprint(f"LoggingActor error during on_receive: {e}", color='red')
raise e
def _cleanup(self):
if self.progress_bar is not None:
self.progress_bar.close()
def on_failure(
self,
exception_type: Type[BaseException],
exception_value: BaseException,
traceback: TracebackType,
) -> None:
self._cleanup()
def on_stop(self) -> None:
self._cleanup() | /retriever_research-0.0.5.tar.gz/retriever_research-0.0.5/retriever_research/logging.py | 0.733738 | 0.326889 | logging.py | pypi |
import time
from datetime import datetime, timezone
import psutil
from typing import List, Tuple
GIGA = 1_000_000_000
MEGA = 1_000_000
class ThroughputTracker:
def __init__(self, name: str, multiplier: float = 1.0):
self.name = name
self.multiplier = multiplier # Adjust output unit
self.last_measured_time = time.time()
self.last_measured_val = 0.0
def add_measurement(self, new_val: float, log=False) -> float:
"""
Add a new value and return the throughput since the last measurement. The Measurement from
the first call to add() is meaningless since the starting value is arbitrarily set to 0.
"""
now = time.time()
timestamp = datetime.fromtimestamp(now, timezone.utc)
dur = (now - self.last_measured_time)
delta = (new_val - self.last_measured_val)
old_last_measured_val = self.last_measured_val
val_per_sec = (new_val - self.last_measured_val) / (now - self.last_measured_time)
self.last_measured_time = now
self.last_measured_val = new_val
throughput = val_per_sec * self.multiplier
if log:
print(new_val, old_last_measured_val, dur, delta, throughput)
return throughput
class SimpleCpuUtilCollector:
@staticmethod
def sample() -> float:
return psutil.cpu_percent()
class DetailedCpuUtilCollector:
@staticmethod
def sample() -> List[float]:
per_cpu = psutil.cpu_percent(percpu=True)
return per_cpu
class FreeMemoryCollector:
@staticmethod
def sample() -> float:
free_mem_bytes = psutil.virtual_memory().available
return free_mem_bytes / GIGA
class ProcInfoCollector:
@staticmethod
def sample(log_access_denied=True) -> Tuple[float, int]:
# Returns memory_used, proc_count
this_process = psutil.Process()
proc_count = 1
proc_mem_used = this_process.memory_info().rss
for child in this_process.children(recursive=True):
try:
proc_count += 1
proc_mem_used += child.memory_info().rss
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
if log_access_denied:
print(f"[Profiler.ProcInfoCollector] AccessDenied when retrieving process info for child ({child}). Ignoring error.")
return proc_mem_used / MEGA, proc_count
class NetThroughputCollector:
def __init__(self) -> None:
self.sent_throughput = ThroughputTracker("Network Sent (Gbit/s)", multiplier=8 / GIGA)
self.recv_throughput = ThroughputTracker("Network Recv (Gbit/s)", multiplier=8 / GIGA)
# Discard initial batch that is meaningless
net = psutil.net_io_counters()
self.sent_throughput.add_measurement(net.bytes_sent)
self.recv_throughput.add_measurement(net.bytes_recv)
def sample(self) -> Tuple[float, float]:
net = psutil.net_io_counters()
sent = self.sent_throughput.add_measurement(net.bytes_sent)
recv = self.recv_throughput.add_measurement(net.bytes_recv)
return sent, recv
class DiskReadWriteRateCollector:
def __init__(self) -> None:
self.read_throughput_tracker = ThroughputTracker("Disk Read (Megabytes/s)", multiplier=1/MEGA)
self.write_throughput_tracker = ThroughputTracker("Disk Write (Megabytes/s)", multiplier=1/MEGA)
self.iops = ThroughputTracker("Disk IOPS")
# Discard initial batch that is meaningless
disk = psutil.disk_io_counters()
self.read_throughput_tracker.add_measurement(disk.read_bytes)
self.write_throughput_tracker.add_measurement(disk.write_bytes)
self.iops.add_measurement(disk.read_count + disk.write_count)
def sample(self) -> Tuple[float, float, float]:
"""Return tuple of (Read, Write, IOPS) values"""
disk = psutil.disk_io_counters()
read_throughput = self.read_throughput_tracker.add_measurement(disk.read_bytes)
write_throughput = self.write_throughput_tracker.add_measurement(disk.write_bytes)
iops = self.iops.add_measurement(disk.read_count + disk.write_count, log=False)
# print(disk.read_count + disk.write_count, iops)
return read_throughput, write_throughput, iops | /retriever_research-0.0.5.tar.gz/retriever_research-0.0.5/retriever_research/profiler/collectors.py | 0.75401 | 0.419529 | collectors.py | pypi |

[](https://github.com/weecology/retriever/actions/workflows/python-package.yml)
[](https://ci.appveyor.com/project/ethanwhite/retriever/branch/main)
[](http://depsy.org/package/python/retriever)
[](https://codecov.io/github/weecology/retriever?branch=main)
[](http://retriever.readthedocs.io/en/latest/?badge=latest)
[](https://raw.githubusercontent.com/weecology/retriever/main/LICENSE)
[](https://gitter.im/weecology/retriever?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[](https://doi.org/10.5281/zenodo.1038272)
[](https://doi.org/10.21105/joss.00451)
[](https://anaconda.org/conda-forge/retriever)
[](https://anaconda.org/conda-forge/retriever)
[](https://pypi.python.org/pypi/retriever)
<a href="https://numfocus.org/sponsored-projects">
<img alt="NumFOCUS"
src="https://i0.wp.com/numfocus.org/wp-content/uploads/2019/06/AffiliatedProject.png" width="100" height="18">
</a>
Finding data is one thing. Getting it ready for analysis is another. Acquiring,
cleaning, standardizing and importing publicly available data is time consuming
because many datasets lack machine readable metadata and do not conform to
established data structures and formats. The Data Retriever automates the first
steps in the data analysis pipeline by downloading, cleaning, and standardizing
datasets, and importing them into relational databases, flat files, or
programming languages. The automation of this process reduces the time for a
user to get most large datasets up and running by hours, and in some cases days.
## Installing the Current Release
If you have Python installed you can install the current release using either `pip`:
```bash
pip install retriever
```
or `conda` after adding the `conda-forge` channel (`conda config --add channels conda-forge`):
```bash
conda install retriever
```
Depending on your system configuration this may require `sudo` for `pip`:
```bash
sudo pip install retriever
```
Precompiled binary installers are also available for Windows, OS X, and
Ubuntu/Debian on
the [releases page](https://github.com/weecology/retriever/releases). These do
not require a Python installation.
[List of Available Datasets](https://retriever.readthedocs.io/en/latest/datasets_list.html)
----------------------------
Installing From Source
----------------------
To install the Data Retriever from source, you'll need Python 3.6.8+ with the following packages installed:
* xlrd
The following packages are optionally needed to interact with associated
database management systems:
* PyMySQL (for MySQL)
* sqlite3 (for SQLite)
* psycopg2-binary (for PostgreSQL), previously psycopg2.
* pyodbc (for MS Access - this option is only available on Windows)
* Microsoft Access Driver (ODBC for windows)
### To install from source
Either use `pip` to install directly from GitHub:
```shell
pip install git+https://git@github.com/weecology/retriever.git
```
or:
1. Clone the repository
2. From the directory containing setup.py, run the following command: `pip
install .`. You may need to include `sudo` at the beginning of the
command depending on your system (i.e., `sudo pip install .`).
More extensive documentation for those that are interested in developing can be found [here](http://retriever.readthedocs.io/en/latest/?badge=latest)
Using the Command Line
----------------------
After installing, run `retriever update` to download all of the available dataset scripts.
To see the full list of command line options and datasets run `retriever --help`.
The output will look like this:
```shell
usage: retriever [-h] [-v] [-q]
{download,install,defaults,update,new,new_json,edit_json,delete_json,ls,citation,reset,help}
...
positional arguments:
{download,install,defaults,update,new,new_json,edit_json,delete_json,ls,citation,reset,help}
sub-command help
download download raw data files for a dataset
install download and install dataset
defaults displays default options
update download updated versions of scripts
new create a new sample retriever script
new_json CLI to create retriever datapackage.json script
edit_json CLI to edit retriever datapackage.json script
delete_json CLI to remove retriever datapackage.json script
ls display a list all available dataset scripts
citation view citation
reset reset retriever: removes configuration settings,
scripts, and cached data
help
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-q, --quiet suppress command-line output
```
To install datasets, use `retriever install`:
```shell
usage: retriever install [-h] [--compile] [--debug]
{mysql,postgres,sqlite,msaccess,csv,json,xml} ...
positional arguments:
{mysql,postgres,sqlite,msaccess,csv,json,xml}
engine-specific help
mysql MySQL
postgres PostgreSQL
sqlite SQLite
msaccess Microsoft Access
csv CSV
json JSON
xml XML
optional arguments:
-h, --help show this help message and exit
--compile force re-compile of script before downloading
--debug run in debug mode
```
### Examples
These examples are using the [*Iris* flower dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set).
More examples can be found in the Data Retriever documentation.
Using Install
```shell
retriever install -h (gives install options)
```
Using specific database engine, retriever install {Engine}
```shell
retriever install mysql -h (gives install mysql options)
retriever install mysql --user myuser --password ******** --host localhost --port 8888 --database_name testdbase iris
```
install data into an sqlite database named iris.db you would use:
```shell
retriever install sqlite iris -f iris.db
```
Using download
```shell
retriever download -h (gives you help options)
retriever download iris
retriever download iris --path C:\Users\Documents
```
Using citation
```shell
retriever citation (citation of the retriever engine)
retriever citation iris (citation for the iris data)
```
Spatial Dataset Installation
----------------------------
**Set up Spatial support**
To set up spatial support for Postgres using Postgis please
refer to the [spatial set-up docs](https://retriever.readthedocs.io/en/latest/spatial_dbms.html).
```shell
retriever install postgres harvard-forest # Vector data
retriever install postgres bioclim # Raster data
# Install only the data of USGS elevation in the given extent
retriever install postgres usgs-elevation -b -94.98704597353938 39.027001800158615 -94.3599408119917 40.69577051867074
```
Website
-------
For more information see the
[Data Retriever website](http://www.data-retriever.org/).
Acknowledgments
---------------
Development of this software was funded by the [Gordon and Betty Moore
Foundation's Data-Driven Discovery
Initiative](https://www.moore.org/initiative-strategy-detail?initiativeId=data-driven-discovery) through
[Grant GBMF4563](http://www.moore.org/grants/list/GBMF4563) to Ethan White and
the [National Science Foundation](http://nsf.gov/) as part of a [CAREER award to
Ethan White](http://nsf.gov/awardsearch/showAward.do?AwardNumber=0953694).
| /retriever-3.1.0.tar.gz/retriever-3.1.0/README.md | 0.46563 | 0.889864 | README.md | pypi |
<div align="center">
<img src="https://repository-images.githubusercontent.com/566840861/ce7eeed0-7454-4aff-9073-235a83eeb6e7">
</div>
<p align="center">
<!-- Python -->
<a href="https://www.python.org" alt="Python">
<img src="https://badges.aleen42.com/src/python.svg" />
</a>
<!-- Version -->
<a href="https://badge.fury.io/py/retriv"><img src="https://badge.fury.io/py/retriv.svg" alt="PyPI version" height="18"></a>
<!-- Docs -->
<!-- <a href="https://amenra.github.io/retriv"><img src="https://img.shields.io/badge/docs-passing-<COLOR>.svg" alt="Documentation Status"></a> -->
<!-- Black -->
<a href="https://github.com/psf/black" alt="Code style: black">
<img src="https://img.shields.io/badge/code%20style-black-000000.svg" />
</a>
<!-- License -->
<a href="https://lbesson.mit-license.org/"><img src="https://img.shields.io/badge/License-MIT-blue.svg" alt="License: MIT"></a>
<!-- Google Colab -->
<!-- <a href="https://colab.research.google.com/github/AmenRa/retriv/blob/master/notebooks/1_overview.ipynb"> -->
<!-- <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> -->
</a>
</p>
## 🔥 News
- [August 23, 2023] `retriv` 0.2.2 is out!
This release adds _experimental_ support for multi-field documents and filters.
Please, refer to [Advanced Retriever](https://github.com/AmenRa/retriv/blob/main/docs/advanced_retriever.md) documentation.
- [February 18, 2023] `retriv` 0.2.0 is out!
This release adds support for Dense and Hybrid Retrieval.
Dense Retrieval leverages the semantic similarity of the queries' and documents' vector representations, which can be computed directly by `retriv` or imported from other sources.
Hybrid Retrieval mix traditional retrieval, informally called Sparse Retrieval, and Dense Retrieval results to further improve retrieval effectiveness.
As the library was almost completely redone, indices built with previous versions are no longer supported.
## ⚡️ Introduction
[retriv](https://github.com/AmenRa/retriv) is a user-friendly and efficient [search engine](https://en.wikipedia.org/wiki/Search_engine) implemented in [Python](https://en.wikipedia.org/wiki/Python_(programming_language)) supporting Sparse (traditional search with [BM25](https://en.wikipedia.org/wiki/Okapi_BM25), [TF-IDF](https://en.wikipedia.org/wiki/Tf–idf)), Dense ([semantic search](https://en.wikipedia.org/wiki/Semantic_search)) and Hybrid retrieval (a mix of Sparse and Dense Retrieval).
It allows you to build a search engine in a __single line of code__.
[retriv](https://github.com/AmenRa/retriv) is built upon [Numba](https://github.com/numba/numba) for high-speed [vector operations](https://en.wikipedia.org/wiki/Automatic_vectorization) and [automatic parallelization](https://en.wikipedia.org/wiki/Automatic_parallelization), [PyTorch](https://pytorch.org) and [Transformers](https://huggingface.co/docs/transformers/index) for easy access and usage of [Transformer-based Language Models](https://web.stanford.edu/~jurafsky/slp3/10.pdf), and [Faiss](https://github.com/facebookresearch/faiss) for approximate [nearest neighbor search](https://en.wikipedia.org/wiki/Nearest_neighbor_search).
In addition, it provides automatic tuning functionalities to allow you to tune its internal components with minimal intervention.
## ✨ Main Features
### Retrievers
- [Sparse Retriever](https://github.com/AmenRa/retriv/blob/main/docs/sparse_retriever.md): standard searcher based on lexical matching.
[retriv](https://github.com/AmenRa/retriv) implements [BM25](https://en.wikipedia.org/wiki/Okapi_BM25) as its main retrieval model.
[TF-IDF](https://en.wikipedia.org/wiki/Tf–idf) is also supported for educational purposes.
The sparse retriever comes armed with multiple [stemmers](https://en.wikipedia.org/wiki/Stemming), [tokenizers](https://en.wikipedia.org/wiki/Lexical_analysis#Tokenization), and [stop-word](https://en.wikipedia.org/wiki/Stop_word) lists, for multiple languages.
Click [here](https://github.com/AmenRa/retriv/blob/main/docs/sparse_retriever.md) to learn more.
- [Dense Retriever](https://github.com/AmenRa/retriv/blob/main/docs/dense_retriever.md): a dense retriever is a retrieval model that performs [semantic search](https://en.wikipedia.org/wiki/Semantic_search).
Click [here](https://github.com/AmenRa/retriv/blob/main/docs/dense_retriever.md) to learn more.
- [Hybrid Retriever](https://github.com/AmenRa/retriv/blob/main/docs/hybrid_retriever.md): an hybrid retriever is a retrieval model built on top of a sparse and a dense retriever.
Click [here](https://github.com/AmenRa/retriv/blob/main/docs/hybrid_retriever.md) to learn more.
- [Advanced Retriever](https://github.com/AmenRa/retriv/blob/main/docs/advanced_retriever.md): an advanced sparse retriever supporting filters. This is and experimental feature.
Click [here](https://github.com/AmenRa/retriv/blob/main/docs/advanced_retriever.md) to learn more.
### Unified Search Interface
All the supported retrievers share the same search interface:
- [search](#search): standard search functionality, what you expect by a search engine.
- [msearch](#multi-search): computes the results for multiple queries at once.
It leverages [automatic parallelization](https://en.wikipedia.org/wiki/Automatic_parallelization) whenever possible.
- [bsearch](#batch-search): similar to [msearch](#multi-search) but automatically generates batches of queries to evaluate and allows dynamic writing of the search results to disk in [JSONl](https://jsonlines.org) format. [bsearch](#batch-search) is handy for computing results for hundreds of thousands or even millions of queries without hogging your RAM. Pre-computed results can be leveraged for negative sampling during the training of [Neural Models](https://en.wikipedia.org/wiki/Artificial_neural_network) for [Information Retrieval](https://en.wikipedia.org/wiki/Information_retrieval).
### AutoTune
[retriv](https://github.com/AmenRa/retriv) automatically tunes [Faiss](https://github.com/facebookresearch/faiss) configuration for approximate nearest neighbors search by leveraging [AutoFaiss](https://github.com/criteo/autofaiss) to guarantee 10ms response time based on your available hardware.
Moreover, it offers an automatic tuning functionality for [BM25](https://en.wikipedia.org/wiki/Okapi_BM25)'s parameters, which require minimal user intervention.
Under the hood, [retriv](https://github.com/AmenRa/retriv) leverages [Optuna](https://optuna.org), a [hyperparameter optimization](https://en.wikipedia.org/wiki/Hyperparameter_optimization) framework, and [ranx](https://github.com/AmenRa/ranx), an [Information Retrieval](https://en.wikipedia.org/wiki/Information_retrieval) evaluation library, to test several parameter configurations for [BM25](https://en.wikipedia.org/wiki/Okapi_BM25) and choose the best one.
Finally, it can automatically balance the importance of lexical and semantic relevance scores computed by the [Hybrid Retriever](https://github.com/AmenRa/retriv/blob/main/docs/hybrid_retriever.md) to maximize retrieval effectiveness.
## 📚 Documentation
- [Sparse Retriever](https://github.com/AmenRa/retriv/blob/main/docs/sparse_retriever.md)
- [Dense Retriever](https://github.com/AmenRa/retriv/blob/main/docs/dense_retriever.md)
- [Hybrid Retriever](https://github.com/AmenRa/retriv/blob/main/docs/hybrid_retriever.md)
- [Text Pre-Processing](https://github.com/AmenRa/retriv/blob/main/docs/text_preprocessing.md)
- [FAQ](https://github.com/AmenRa/retriv/blob/main/docs/faq.md)
## 🔌 Requirements
```
python>=3.8
```
## 💾 Installation
```bash
pip install retriv
```
## 💡 Minimal Working Example
```python
# Note: SearchEngine is an alias for the SparseRetriever
from retriv import SearchEngine
collection = [
{"id": "doc_1", "text": "Generals gathered in their masses"},
{"id": "doc_2", "text": "Just like witches at black masses"},
{"id": "doc_3", "text": "Evil minds that plot destruction"},
{"id": "doc_4", "text": "Sorcerer of death's construction"},
]
se = SearchEngine("new-index").index(collection)
se.search("witches masses")
```
Output:
```json
[
{
"id": "doc_2",
"text": "Just like witches at black masses",
"score": 1.7536403
},
{
"id": "doc_1",
"text": "Generals gathered in their masses",
"score": 0.6931472
}
]
```
## 🎁 Feature Requests
Would you like to see other features implemented? Please, open a [feature request](https://github.com/AmenRa/retriv/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=%5BFeature+Request%5D+title).
## 🤘 Want to contribute?
Would you like to contribute? Please, drop me an [e-mail](mailto:elias.bssn@gmail.com?subject=[GitHub]%20retriv).
## 📄 License
[retriv](https://github.com/AmenRa/retriv) is an open-sourced software licensed under the [MIT license](LICENSE).
| /retriv-0.2.3.tar.gz/retriv-0.2.3/README.md | 0.590307 | 0.831109 | README.md | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.