code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import collections.abc
import hashlib
import pathlib
from typing import Any, Optional
from ...query import QuerySpecification
from ...serde import pydantic_jsonable_dict
from .delegate import FileDelegate, S3Credentials
from .progress import (
NoopProgressMonitorFactory,
ProgressMonitorFactory,
)
from .record import FileRecord
class File:
__delegate: FileDelegate
__record: FileRecord
@staticmethod
def compute_file_id(uri: str) -> str:
"""
Return a fixed size (32 character), unique ID deterministically from an Object's bucket and key.
Versioned objects will have the same ID.
"""
return hashlib.blake2b(uri.encode("utf-8"), digest_size=16).hexdigest()
@staticmethod
def construct_s3_obj_uri(
bucket: str, key: str, version: Optional[str] = None
) -> str:
base_uri = f"s3://{bucket}/{key}"
if version:
base_uri += f"?versionId={version}"
return base_uri
@classmethod
def from_id(
cls,
file_id: str,
delegate: FileDelegate,
org_id: Optional[str] = None,
) -> "File":
record = delegate.get_record_by_primary_key(file_id, org_id)
return cls(record, delegate)
@classmethod
def query(
cls,
query: QuerySpecification,
delegate: FileDelegate,
org_id: Optional[str] = None,
) -> collections.abc.Generator["File", None, None]:
known = set(FileRecord.__fields__.keys())
actual = set()
for field in query.fields():
# Support dot notation for nested fields
# E.g., "metadata.SoftwareVersion"
if "." in field:
actual.add(field.split(".")[0])
else:
actual.add(field)
unknown = actual - known
if unknown:
plural = len(unknown) > 1
msg = (
"are not known attributes of File"
if plural
else "is not a known attribute of File"
)
raise ValueError(f"{unknown} {msg}. Known attributes: {known}")
paginated_results = delegate.query_files(query, org_id=org_id)
while True:
for record in paginated_results.items:
yield cls(record, delegate)
if paginated_results.next_token:
query.after = paginated_results.next_token
paginated_results = delegate.query_files(query, org_id=org_id)
else:
break
def __init__(self, record: FileRecord, delegate: FileDelegate):
self.__record = record
self.__delegate = delegate
@property
def file_id(self) -> str:
return self.__record.file_id
@property
def uri(self) -> str:
return self.__record.uri
@property
def record(self) -> FileRecord:
return self.__record
@property
def relative_path(self) -> str:
return self.__record.relative_path
def delete(self) -> None:
self.__delegate.delete_file(self.__record)
def download(
self,
local_path: pathlib.Path,
credentials: S3Credentials,
progress_monitor_factory: ProgressMonitorFactory = NoopProgressMonitorFactory(),
):
self.__delegate.download_file(
self.__record,
local_path,
credentials,
progress_monitor_factory=progress_monitor_factory,
)
def get_signed_url(self) -> str:
return self.__delegate.get_signed_url(self.__record)
def to_dict(self) -> dict[str, Any]:
return pydantic_jsonable_dict(self.__record) | /roboto_sdk-0.1.27.tar.gz/roboto_sdk-0.1.27/src/roboto_sdk/domain/files/file.py | 0.868604 | 0.154663 | file.py | pypi |
import abc
from typing import Optional
import tqdm
class ProgressMonitor(abc.ABC):
@abc.abstractmethod
def update(self, uploaded_bytes: int):
raise NotImplementedError("update")
@abc.abstractmethod
def close(self):
raise NotImplementedError("close")
@abc.abstractmethod
def is_closed(self) -> bool:
raise NotImplementedError("is_closed")
class ProgressMonitorFactory(abc.ABC):
@abc.abstractmethod
def upload_monitor(self, source: str, size: int) -> ProgressMonitor:
raise NotImplementedError("upload_monitor")
def download_monitor(self, source: str, size: int) -> ProgressMonitor:
raise NotImplementedError("download_monitor")
class NoopProgressMonitor(ProgressMonitor):
def update(self, uploaded_bytes: int):
pass
def close(self):
pass
def is_closed(self) -> bool:
return False
class NoopProgressMonitorFactory(ProgressMonitorFactory):
def upload_monitor(self, source: str, size: int) -> ProgressMonitor:
return NoopProgressMonitor()
def download_monitor(self, source: str, size: int) -> ProgressMonitor:
return NoopProgressMonitor()
class TqdmProgressMonitor(ProgressMonitor):
__tqdm: tqdm.tqdm
__is_closed: bool
def __init__(self, total: int, desc: str, position: int = 0, leave: bool = True):
self.__tqdm = tqdm.tqdm(
total=total,
desc=desc,
bar_format="{percentage:.1f}%|{bar:25} | {rate_fmt} | {desc}",
unit="B",
unit_scale=True,
unit_divisor=1024,
position=position,
leave=leave,
)
self.__is_closed = False
def update(self, uploaded_bytes: int):
self.__tqdm.update(uploaded_bytes)
def close(self):
self.__tqdm.close()
self.__is_closed = True
def is_closed(self) -> bool:
return self.__is_closed
class TqdmProgressMonitorFactory(ProgressMonitorFactory):
__monitors: list[Optional[ProgressMonitor]]
def __init__(self, concurrency: int = 1):
self.__monitors = [None] * concurrency
def __first_available_slot(self) -> Optional[int]:
for idx in range(len(self.__monitors)):
if self.__monitors[idx] is None or self.__monitors[idx].is_closed(): # type: ignore[union-attr]
return idx
return None
def __any_monitor(self, source: str, size: int) -> ProgressMonitor:
# This for sure is not fully threadsafe, but it 100% works for single threading and
# _mostly_ works for multithreading.
slot = self.__first_available_slot()
if slot is None:
raise ValueError("Number of concurrent monitors is exceeding concurrency!")
monitor = TqdmProgressMonitor(
total=size,
desc=f"Source: {source}",
position=slot,
leave=len(self.__monitors) == 1,
)
self.__monitors[slot] = monitor
return monitor
def upload_monitor(self, source: str, size: int) -> ProgressMonitor:
return self.__any_monitor(source=source, size=size)
def download_monitor(self, source: str, size: int) -> ProgressMonitor:
return self.__any_monitor(source=source, size=size) | /roboto_sdk-0.1.27.tar.gz/roboto_sdk-0.1.27/src/roboto_sdk/domain/files/progress.py | 0.835416 | 0.245277 | progress.py | pypi |
import abc
from typing import Any, Optional
from .record import (
OrgInviteRecord,
OrgRecord,
OrgRoleName,
OrgRoleRecord,
OrgType,
)
class OrgDelegate(abc.ABC):
@abc.abstractmethod
def create_org(
self,
creator_user_id: Optional[str],
name: str,
org_type: OrgType,
bind_email_domain: bool = False,
) -> OrgRecord:
raise NotImplementedError("create_org")
@abc.abstractmethod
def update_org(
self,
updates: dict[str, Any],
org_id: Optional[str] = None,
caller_user_id: Optional[str] = None,
) -> OrgRecord:
raise NotImplementedError("update_org")
@abc.abstractmethod
def orgs_for_user(self, user_id: Optional[str]) -> list[OrgRecord]:
raise NotImplementedError("orgs_for_user")
@abc.abstractmethod
def org_roles_for_user(self, user_id: Optional[str]) -> list[OrgRoleRecord]:
raise NotImplementedError("org_roles_for_user")
@abc.abstractmethod
def org_roles_for_org(self, org_id: Optional[str]) -> list[OrgRoleRecord]:
raise NotImplementedError("org_roles_for_org")
@abc.abstractmethod
def org_role_for_user_in_org(
self, user_id: Optional[str] = None, org_id: Optional[str] = None
) -> OrgRoleRecord:
raise NotImplementedError("org_role_for_user_in_org")
@abc.abstractmethod
def add_role_for_user(
self, user_id: str, role_name: OrgRoleName, org_id: Optional[str] = None
):
raise NotImplementedError("add_role_for_user")
@abc.abstractmethod
def remove_role_from_user(
self, user_id: str, role_name: OrgRoleName, org_id: Optional[str] = None
):
raise NotImplementedError("remove_role_for_user")
@abc.abstractmethod
def remove_user_from_org(self, user_id: str, org_id: Optional[str] = None) -> None:
raise NotImplementedError("remove_user_from_org")
@abc.abstractmethod
def get_org_by_id(self, org_id: str) -> OrgRecord:
raise NotImplementedError("get_org_by_id")
@abc.abstractmethod
def delete_org(self, org_id: str):
raise NotImplementedError("delete_org")
@abc.abstractmethod
def bind_email_domain(self, org_id: str, email_domain: str):
raise NotImplementedError("bind_email_domain")
@abc.abstractmethod
def unbind_email_domain(self, email_domain: str, org_id: Optional[str] = None):
raise NotImplementedError("unbind_email_domain")
@abc.abstractmethod
def get_email_domains_for_org(self, org_id: Optional[str] = None) -> list[str]:
raise NotImplementedError("get_email_domains_for_org")
@abc.abstractmethod
def invite_user_to_org(
self, invited_user_id: str, org_id: str, inviting_user_id: Optional[str] = None
) -> OrgInviteRecord:
raise NotImplementedError("invite_user_to_org")
@abc.abstractmethod
def get_invites_for_org(
self, org_id: Optional[str] = None
) -> list[OrgInviteRecord]:
raise NotImplementedError("get_invites_for_org")
@abc.abstractmethod
def accept_org_invite(self, invite_id: str, user_id: Optional[str] = None):
raise NotImplementedError("accept_org_invite")
@abc.abstractmethod
def decline_org_invite(self, invite_id: str, user_id: Optional[str] = None):
raise NotImplementedError("accept_org_invite")
@abc.abstractmethod
def get_org_invite(
self, invite_id: str, user_id: Optional[str] = None
) -> OrgInviteRecord:
raise NotImplementedError("get_org_invite") | /roboto_sdk-0.1.27.tar.gz/roboto_sdk-0.1.27/src/roboto_sdk/domain/orgs/delegate.py | 0.710729 | 0.158663 | delegate.py | pypi |
import abc
import datetime
from typing import Any, Optional
import pydantic
from ...auth import Permissions
from ...http import PaginatedList
from ...query import QuerySpecification
from ...serde import pydantic_jsonable_dict
from ...time import utcnow
from ...updates import (
MetadataChangeset,
UpdateCondition,
)
from ..files import FileRecord
from .record import (
Administrator,
DatasetRecord,
StorageLocation,
)
class Credentials(pydantic.BaseModel):
access_key_id: str
bucket: str
expiration: datetime.datetime
secret_access_key: str
session_token: str
required_prefix: str
upload_id: Optional[str] = None
def is_expired(self) -> bool:
return utcnow() >= self.expiration
def to_dict(self) -> dict[str, Any]:
return pydantic_jsonable_dict(self, exclude_none=True)
class DatasetDelegate(abc.ABC):
@abc.abstractmethod
def complete_upload(
self, dataset_id: str, upload_id: str, org_id: Optional[str] = None
) -> None:
raise NotImplementedError("complete_upload")
@abc.abstractmethod
def create_dataset(
self,
administrator: Administrator = Administrator.Roboto,
metadata: Optional[dict[str, Any]] = None,
storage_location: StorageLocation = StorageLocation.S3,
tags: Optional[list[str]] = None,
org_id: Optional[str] = None,
created_by: Optional[str] = None,
description: Optional[str] = None,
) -> DatasetRecord:
raise NotImplementedError("create_dataset")
@abc.abstractmethod
def delete_dataset(self, record: DatasetRecord) -> None:
raise NotImplementedError("delete_dataset")
@abc.abstractmethod
def get_dataset_by_primary_key(
self,
dataset_id: str,
org_id: Optional[str] = None,
) -> DatasetRecord:
raise NotImplementedError("get_dataset_by_primary_key")
@abc.abstractmethod
def get_temporary_credentials(
self,
record: DatasetRecord,
permissions: Permissions,
caller: Optional[str] = None,
transaction_id: Optional[str] = None,
) -> Credentials:
raise NotImplementedError("get_temporary_credentials")
@abc.abstractmethod
def list_files(
self,
dataset_id: str,
org_id: Optional[str] = None,
page_token: Optional[str] = None,
) -> PaginatedList[FileRecord]:
raise NotImplementedError("list_files")
@abc.abstractmethod
def query_datasets(
self,
query: QuerySpecification,
org_id: Optional[str] = None,
) -> PaginatedList[DatasetRecord]:
raise NotImplementedError("query_datasets")
@abc.abstractmethod
def update(
self,
record: DatasetRecord,
metadata_changeset: Optional[MetadataChangeset] = None,
conditions: Optional[list[UpdateCondition]] = None,
description: Optional[str] = None,
updated_by: Optional[str] = None,
) -> DatasetRecord:
raise NotImplementedError("update") | /roboto_sdk-0.1.27.tar.gz/roboto_sdk-0.1.27/src/roboto_sdk/domain/datasets/delegate.py | 0.804406 | 0.17676 | delegate.py | pypi |
import collections.abc
from typing import Any, Optional
from ...domain.actions import (
Action,
ActionDelegate,
ComputeRequirements,
ContainerParameters,
Invocation,
InvocationDataSource,
InvocationDelegate,
InvocationSource,
)
from ...query import (
ConditionType,
QuerySpecification,
)
from ...serde import pydantic_jsonable_dict
from .trigger_delegate import TriggerDelegate
from .trigger_record import TriggerRecord
class Trigger:
__record: TriggerRecord
__action_delegate: ActionDelegate
__invocation_delegate: InvocationDelegate
__trigger_delegate: TriggerDelegate
@classmethod
def create(
cls,
name: str,
action_name: str,
required_inputs: list[str],
action_delegate: ActionDelegate,
invocation_delegate: InvocationDelegate,
trigger_delegate: TriggerDelegate,
org_id: Optional[str] = None,
created_by: Optional[str] = None,
compute_requirement_overrides: Optional[ComputeRequirements] = None,
container_parameter_overrides: Optional[ContainerParameters] = None,
condition: Optional[ConditionType] = None,
) -> "Trigger":
"""
Creates an executor trigger, which automatically invokes an action on every new dataset that meets
some acceptance criteria.
Args:
name: A human-readable name for this trigger. Trigger names must be unique within each
organization, though collisions are fine across different organizations.
action_name: The name of an executor action to run.
If an action with the specified name is not found, the trigger will not be created.
If the action bound to this trigger is ever deleted, the trigger will be deleted along with it.
required_inputs: A list of gitignore path patterns that describe a set of files required to
invoke an action described by this trigger against a given dataset.
An action will be invoked if at least one file is uploaded that matches each listed condition.
Once invoked, this list of path patterns will also be used to determine which files from the dataset
to make available to the action at runtime (i.e., downloaded into $INPUT_DIR).
If you want to make the entire dataset available to the action, add a condition for "**/*" to the
end of required_inputs.
org_id: The ID of the organization the user is making this request on behalf of. If the user is
only a member of one organization, this parameter will be set implicitly.
compute_requirement_overrides: Optional overrides of the compute parameters specified by the
action.
container_parameter_overrides: Optional overrides of the container parameters specified by the
action.
action_delegate: An abstraction object for performing actions against the actions API.
invocation_delegate: An abstraction object for performing actions against the invocations API.
trigger_delegate: An abstraction object for performing actions against the triggers API.
Returns:
Trigger: A reference to a Trigger entity object which allows the user to perform additional operations
on the newly created Trigger.
"""
record = trigger_delegate.create_trigger(
name=name,
org_id=org_id,
action_name=action_name,
required_inputs=required_inputs,
compute_requirement_overrides=compute_requirement_overrides,
container_parameter_overrides=container_parameter_overrides,
created_by=created_by,
condition=condition,
)
return cls(
record=record,
action_delegate=action_delegate,
invocation_delegate=invocation_delegate,
trigger_delegate=trigger_delegate,
)
@classmethod
def from_name(
cls,
name: str,
action_delegate: ActionDelegate,
invocation_delegate: InvocationDelegate,
trigger_delegate: TriggerDelegate,
org_id: Optional[str] = None,
) -> "Trigger":
record = trigger_delegate.get_trigger_by_primary_key(name=name, org_id=org_id)
return cls(
record=record,
action_delegate=action_delegate,
invocation_delegate=invocation_delegate,
trigger_delegate=trigger_delegate,
)
@classmethod
def query(
cls,
query: QuerySpecification,
action_delegate: ActionDelegate,
invocation_delegate: InvocationDelegate,
trigger_delegate: TriggerDelegate,
org_id: Optional[str] = None,
) -> collections.abc.Generator["Trigger", None, None]:
paginated_results = trigger_delegate.query_triggers(query, org_id=org_id)
while True:
for record in paginated_results.items:
yield cls(
record=record,
action_delegate=action_delegate,
invocation_delegate=invocation_delegate,
trigger_delegate=trigger_delegate,
)
if paginated_results.next_token:
query.after = paginated_results.next_token
paginated_results = trigger_delegate.query_triggers(
query, org_id=org_id
)
else:
break
def __init__(
self,
record: TriggerRecord,
action_delegate: ActionDelegate,
invocation_delegate: InvocationDelegate,
trigger_delegate: TriggerDelegate,
):
self.__record = record
self.__action_delegate = action_delegate
self.__invocation_delegate = invocation_delegate
self.__trigger_delegate = trigger_delegate
@property
def name(self):
return self.__record.name
@property
def record(self) -> TriggerRecord:
return self.__record
@property
def condition(self) -> Optional[ConditionType]:
return self.__record.condition
@property
def enabled(self) -> bool:
return self.__record.enabled
def delete(self):
self.__trigger_delegate.delete_trigger(
name=self.__record.name, org_id=self.__record.org_id
)
def update(
self, updates: dict[str, Any], updated_by: Optional[str] = None
) -> TriggerRecord:
self.__record = self.__trigger_delegate.update_trigger(
name=self.__record.name,
org_id=self.__record.org_id,
updates=updates,
updated_by=updated_by,
)
return self.__record
def action(self) -> Action:
return Action.from_name(
name=self.__record.action_name,
action_delegate=self.__action_delegate,
invocation_delegate=self.__invocation_delegate,
org_id=self.__record.org_id,
)
def invoke(self, data_source: InvocationDataSource) -> Invocation:
params: dict[str, Any] = {
"input_data": self.__record.required_inputs,
"data_source_id": data_source.data_source_id,
"data_source_type": data_source.data_source_type,
"invocation_source": InvocationSource.Trigger,
"invocation_source_id": self.__record.name,
"compute_requirement_overrides": self.__record.compute_requirement_overrides,
"container_parameter_overrides": self.__record.container_parameter_overrides,
}
return self.action().invoke(**params)
def to_dict(self) -> dict[str, Any]:
return pydantic_jsonable_dict(self.__record) | /roboto_sdk-0.1.27.tar.gz/roboto_sdk-0.1.27/src/roboto_sdk/domain/triggers/trigger.py | 0.913624 | 0.185799 | trigger.py | pypi |
import datetime
import enum
import json
from typing import Optional
import pydantic
from ...serde import pydantic_jsonable_dict
from .action_container_resources import (
ComputeRequirements,
ContainerParameters,
)
class InvocationDataSourceType(enum.Enum):
"""Source of data for an Action's InputBinding"""
Dataset = "Dataset"
class InvocationDataSource(pydantic.BaseModel):
data_source_type: InvocationDataSourceType
# The "type" determines the meaning of "id":
# - if type is "Dataset," id is a dataset_id
data_source_id: str
class InvocationSource(enum.Enum):
Trigger = "Trigger"
Manual = "Manual"
class InvocationProvenance(pydantic.BaseModel):
source_type: InvocationSource
# The “type” determines the meaning of “id:”
# - if type is “Trigger,” id is a TriggerId;
# - if type is “Manual,” id is a UserId.
source_id: str
class InvocationStatus(enum.Enum):
Queued = 0
Scheduled = 1
Downloading = 2
Processing = 3
Uploading = 4
Completed = 5
# Failure status' exist outside linear progression of invocation status
Failed = 998
Deadly = 999
def __str__(self) -> str:
return self.name
def can_transition_to(self, other: "InvocationStatus") -> bool:
if self == other:
return True
if self in {InvocationStatus.Completed, InvocationStatus.Deadly}:
return False
if self is InvocationStatus.Failed:
if other in {InvocationStatus.Queued, InvocationStatus.Deadly}:
return True
return False
if other is InvocationStatus.Failed:
return True
if other is InvocationStatus.Deadly:
return self in {InvocationStatus.Queued, InvocationStatus.Failed}
return other.value - self.value == 1
def is_running(self) -> bool:
return self in {
InvocationStatus.Downloading,
InvocationStatus.Processing,
InvocationStatus.Uploading,
}
def is_terminal(self) -> bool:
return self in {
InvocationStatus.Completed,
InvocationStatus.Failed,
InvocationStatus.Deadly,
}
def next(self) -> Optional["InvocationStatus"]:
if self.is_terminal():
return None
return InvocationStatus(self.value + 1)
class InvocationStatusRecord(pydantic.BaseModel):
status: InvocationStatus
detail: Optional[str] = None
timestamp: datetime.datetime # Persisted as ISO 8601 string in UTC
def to_presentable_dict(self) -> dict[str, Optional[str]]:
return {
"status": str(self.status),
"timestamp": self.timestamp.isoformat(),
"detail": self.detail,
}
class InvocationRecord(pydantic.BaseModel):
# When adding or removing fields, make sure to update __str__
action_name: str
created: datetime.datetime # Persisted as ISO 8601 string in UTC
data_source: InvocationDataSource
input_data: list[str]
invocation_id: str # Sort key
logs_bucket: Optional[str] = None
logs_prefix: Optional[str] = None
compute_requirements: ComputeRequirements
container_parameters: ContainerParameters
org_id: str # Partition key
provenance: InvocationProvenance
status: list[InvocationStatusRecord] = pydantic.Field(default_factory=list)
def __str__(self) -> str:
return json.dumps(
{
"action_name": self.action_name,
"created": self.created.isoformat(),
"data_source": pydantic_jsonable_dict(self.data_source),
"input_data": self.input_data,
"invocation_id": self.invocation_id,
"logs_bucket": self.logs_bucket,
"logs_prefix": self.logs_prefix,
"compute_requirements": pydantic_jsonable_dict(
self.compute_requirements
),
"container_parameters": pydantic_jsonable_dict(
self.container_parameters
),
"org_id": self.org_id,
"provenance": pydantic_jsonable_dict(self.provenance),
"status": [
status_record.to_presentable_dict() for status_record in self.status
],
},
indent=2,
)
class LogRecord(pydantic.BaseModel):
log: str
timestamp: datetime.datetime | /roboto_sdk-0.1.27.tar.gz/roboto_sdk-0.1.27/src/roboto_sdk/domain/actions/invocation_record.py | 0.82425 | 0.224757 | invocation_record.py | pypi |
import base64
import enum
import json
import typing
from pydantic.generics import GenericModel
from ..logging import default_logger
logger = default_logger()
Model = typing.TypeVar("Model")
class PaginatedList(GenericModel, typing.Generic[Model]):
"""
A list of records pulled from a paginated result set.
It may be a subset of that result set,
in which case `next_token` will be set and can be used to fetch the next page.
"""
items: list[Model]
# Opaque token that can be used to fetch the next page of results.
next_token: typing.Optional[str] = None
class StreamedList(GenericModel, typing.Generic[Model]):
"""
A StreamedList differs from a PaginatedList in that it represents a stream of data that is
in process of being written to. Unlike a result set, which is finite and complete,
a stream may be infinite, and it is unknown when or if it will complete.
"""
items: list[Model]
# Opaque token that can be used to fetch the next page of results.
last_read: typing.Optional[str]
# If True, it is known that there are more items to be fetched;
# use `last_read` as a pagination token to fetch those additional records.
# If False, it is not known if there are more items to be fetched.
has_next: bool
class PaginationTokenEncoding(enum.Enum):
Json = "json"
Raw = "raw"
class PaginationTokenScheme(enum.Enum):
V1 = "v1"
class PaginationToken:
"""
A pagination token that can be treated as a truly opaque token by clients,
with support for evolving the token format over time.
"""
__scheme: PaginationTokenScheme
__encoding: PaginationTokenEncoding
__data: typing.Any
@staticmethod
def empty() -> "PaginationToken":
return PaginationToken(
PaginationTokenScheme.V1, PaginationTokenEncoding.Raw, None
)
@staticmethod
def encode(data: str) -> str:
"""Base64 encode the data and strip all trailing padding ("=")."""
return (
base64.urlsafe_b64encode(data.encode("utf-8")).decode("utf-8").rstrip("=")
)
@staticmethod
def decode(data: str) -> str:
"""Base64 decode the data, adding back any trailing padding ("=") as necessary to make data properly Base64."""
while len(data) % 4 != 0:
data += "="
return base64.urlsafe_b64decode(data).decode("utf-8")
@classmethod
def from_token(cls, token: typing.Optional[str]) -> "PaginationToken":
if token is None:
return PaginationToken.empty()
try:
decoded = PaginationToken.decode(token)
if not decoded.startswith(PaginationTokenScheme.V1.value):
logger.error("Invalid pagination token scheme %s", decoded)
raise ValueError("Invalid pagination token scheme")
scheme, encoding, data = decoded.split(":", maxsplit=2)
pagination_token_scheme = PaginationTokenScheme(scheme)
pagination_token_encoding = PaginationTokenEncoding(encoding)
return cls(
pagination_token_scheme,
pagination_token_encoding,
json.loads(data)
if pagination_token_encoding == PaginationTokenEncoding.Json
else data,
)
except Exception as e:
logger.error("Invalid pagination token", exc_info=e)
raise ValueError("Invalid pagination token format") from None
def __init__(
self,
scheme: PaginationTokenScheme,
encoding: PaginationTokenEncoding,
data: typing.Any,
):
self.__scheme = scheme
self.__encoding = encoding
self.__data = data
def __len__(self):
return len(str(self)) if self.__data else 0
def __str__(self):
return self.to_token()
@property
def data(self) -> typing.Any:
return self.__data
def to_token(self) -> str:
data = (
json.dumps(self.__data)
if self.__encoding == PaginationTokenEncoding.Json
else self.__data
)
return PaginationToken.encode(
f"{self.__scheme.value}:{self.__encoding.value}:{data}"
) | /roboto_sdk-0.1.27.tar.gz/roboto_sdk-0.1.27/src/roboto_sdk/http/response.py | 0.829285 | 0.326835 | response.py | pypi |
import argparse
import enum
import shlex
import typing
import pydantic
from ...domain.actions import (
ComputeRequirements,
ContainerParameters,
)
from ..command import KeyValuePairsAction
from .exceptions import ParseError
class DockerInstructionForm(enum.Enum):
"""The form of a CMD instruction."""
Exec = "exec"
Shell = "shell"
null = object()
def parse_compute_requirements(
args: argparse.Namespace,
default_vcpu: typing.Optional[int] = None,
default_memory: typing.Optional[int] = None,
default_storage: typing.Optional[int] = None,
) -> typing.Optional[ComputeRequirements]:
if not args.vcpu and not args.memory and not args.storage:
return None
try:
kwargs = {
key: value
for key, value in [
("vCPU", args.vcpu if args.vcpu else default_vcpu),
("memory", args.memory if args.memory else default_memory),
("storage", args.storage if args.storage else default_storage),
]
if value is not None
}
if not kwargs:
return None
return ComputeRequirements.parse_obj(kwargs)
except pydantic.ValidationError as exc:
for err in exc.errors():
err_msg = err.get("msg")
msg = err_msg if err_msg else err
raise ParseError(msg) from None
return None
def add_compute_requirements_args(parser: argparse.ArgumentParser) -> None:
resource_requirements_group = parser.add_argument_group(
"Resource requirements",
"Specify required compute resources.",
)
resource_requirements_group.add_argument(
"--vcpu",
required=False,
type=int,
choices=[256, 512, 1024, 2048, 4096, 8192, 16384],
help="CPU units to dedicate to action invocation. Defaults to 512 (0.5vCPU).",
)
resource_requirements_group.add_argument(
"--memory",
required=False,
type=int,
help=(
"Memory (in MiB) to dedicate to action invocation. Defaults to 1024 (1 GiB). "
"Supported values range from 512 (0.5 GiB) to 122880 (120 GiB). "
"Supported values are tied to selected vCPU resources. See documentation for more information."
),
)
resource_requirements_group.add_argument(
"--storage",
required=False,
type=int,
help=(
"Ephemeral storage (in GiB) to dedicate to action invocation. Defaults to 21 GiB. "
"Supported values range from 21 to 200, inclusive."
),
)
# Placeholder
resource_requirements_group.add_argument(
"--gpu",
required=False,
default=False,
action="store_true",
help=(
"This is a placeholder; it currently does nothing. "
"In the future, setting this option will invoke the action in a GPU-enabled compute environment."
),
)
def parse_container_overrides(
args: argparse.Namespace,
default_entry_point: typing.Optional[list[str]] = None,
default_command: typing.Optional[list[str]] = None,
default_env_vars: typing.Optional[dict[str, str]] = None,
default_workdir: typing.Optional[str] = None,
) -> typing.Optional[ContainerParameters]:
if not args.entry_point and not args.command and not args.workdir and not args.env:
return None
try:
entry_point: typing.Union[list[str], object] = default_entry_point
if args.entry_point is null:
entry_point = null
elif args.entry_point is not None:
entry_point = [args.entry_point]
command: typing.Union[list[str], object] = default_command
if args.command is null:
command = null
elif args.command is not None:
command_form = DockerInstructionForm(args.command_form)
command = []
if command_form == DockerInstructionForm.Exec and len(args.command):
lexxer = shlex.shlex(args.command, posix=True, punctuation_chars=True)
lexxer.whitespace_split = True
command = list(lexxer)
else:
command = [args.command]
kwargs = {
key: value
for key, value in [
("entry_point", entry_point),
("command", command),
("workdir", args.workdir if args.workdir else default_workdir),
("env_vars", args.env if args.env else default_env_vars),
]
if value is not None
}
if not kwargs:
return None
return ContainerParameters.parse_obj(
{key: value if value is not null else None for key, value in kwargs.items()}
)
except pydantic.ValidationError as exc:
for err in exc.errors():
err_msg = err.get("msg")
msg = err_msg if err_msg else err
raise ParseError(msg) from None
return None
def add_container_parameters_args(parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group(
"Container parameters",
"Specify parameters to pass to the action's Docker container at runtime.",
)
group.add_argument(
"--entrypoint",
required=False,
type=lambda s: s if s != "null" else null,
dest="entry_point",
help=(
"Container ENTRYPOINT override."
' Supports passing empty string ("") as an override, which unsets the ENTRYPOINT specified in the docker image.' # noqa: E501
" If updating or invoking action which has existing ENTRYPOINT override, pass 'null' to remove the override." # noqa: E501
" Refer to docker documentation for more: "
"https://docs.docker.com/engine/reference/builder/#entrypoint"
" and https://docs.docker.com/engine/reference/run/#entrypoint-default-command-to-execute-at-runtime"
),
)
group.add_argument(
"--command",
required=False,
type=lambda s: s if s != "null" else null,
dest="command",
help=(
"Container CMD override."
" If updating or invoking action which has existing CMD override, pass 'null' to remove the override."
" Refer to docker documentation for more: "
"https://docs.docker.com/engine/reference/builder/#cmd and"
" https://docs.docker.com/engine/reference/run/#cmd-default-command-or-options"
),
)
group.add_argument(
"--command-form",
required=False,
choices=[form.value for form in DockerInstructionForm],
default=DockerInstructionForm.Exec.value,
dest="command_form",
help=(
"In 'exec' form, the provided '--command' str is split into a list of strings"
' (e.g., \'--command "-c \'print(123)\'"\' is parsed as ["-c", "print(123)"]).'
" In 'shell' form, the provided '--command' str is not split"
" (e.g., '--command \"python -c 'print(123)'\"' is parsed as [\"python -c 'print(123)'\"])."
),
)
group.add_argument(
"--workdir",
required=False,
type=lambda s: s if s != "null" else null,
dest="workdir",
help=(
"If updating, pass 'null' to clear existing workdir."
" Refer to docker documentation for more: https://docs.docker.com/engine/reference/run/#workdir"
),
)
group.add_argument(
"--env",
required=False,
metavar="KEY=VALUE",
nargs="*",
action=KeyValuePairsAction,
help=(
"Zero or more 'key=value' formatted pairs to set as container ENV vars. "
"Do not use ENV vars for secrets (such as API keys). "
"See documentation: https://docs.docker.com/engine/reference/run/#env-environment-variables"
),
) | /roboto_sdk-0.1.27.tar.gz/roboto_sdk-0.1.27/src/roboto_sdk/cli/common_args/actions.py | 0.637482 | 0.197793 | actions.py | pypi |
import argparse
import json
import typing
from ...domain.datasets import Dataset
from ...query import (
Comparator,
Condition,
ConditionGroup,
ConditionOperator,
QuerySpecification,
)
from ..command import (
KeyValuePairsAction,
RobotoCommand,
)
from ..common_args import add_org_arg
from ..context import CLIContext
def search(args, context: CLIContext, parser: argparse.ArgumentParser):
conditions: list[typing.Union[Condition, ConditionGroup]] = []
if args.metadata:
for key, value in args.metadata.items():
conditions.append(
Condition(
field=f"metadata.{key}",
comparator=Comparator.Equals,
value=value,
)
)
if args.tag:
for tag in args.tag:
conditions.append(
Condition(
field="tags",
comparator=Comparator.Contains,
value=tag,
)
)
query = QuerySpecification(
condition=ConditionGroup(
conditions=conditions,
operator=ConditionOperator.And,
)
)
records = Dataset.query(query, context.datasets, context.files, org_id=args.org)
print(json.dumps([record.to_dict() for record in records], indent=4))
def search_setup_parser(parser):
parser.add_argument(
"--metadata",
required=False,
metavar="KEY=VALUE",
nargs="*",
action=KeyValuePairsAction,
help=(
"Zero or more 'key=value' pairs which represent dataset metadata. "
"`value` is parsed as JSON. E.g.: --metadata foo=bar --metadata baz.nested=200"
),
)
parser.add_argument(
"--tag",
required=False,
type=str,
nargs="*",
help="One or more tags associated with this dataset. E.g.: --tag foo --tag bar",
action="extend",
)
add_org_arg(parser=parser)
search_command = RobotoCommand(
name="search",
logic=search,
setup_parser=search_setup_parser,
command_kwargs={"help": "Query dataset matching filter criteria."},
) | /roboto_sdk-0.1.27.tar.gz/roboto_sdk-0.1.27/src/roboto_sdk/cli/datasets/search.py | 0.42919 | 0.175786 | search.py | pypi |
import argparse
import json
from ...domain.datasets import Dataset
from ...updates import MetadataChangeset
from ..command import (
KeyValuePairsAction,
RobotoCommand,
)
from ..common_args import add_org_arg
from ..context import CLIContext
from .shared_helpdoc import DATASET_ID_HELP
def update(
args: argparse.Namespace, context: CLIContext, parser: argparse.ArgumentParser
) -> None:
metadata_changeset = MetadataChangeset(
put_tags=args.put_tags,
remove_tags=args.remove_tags,
put_fields=args.put_metadata,
remove_fields=args.remove_metadata,
)
if metadata_changeset.is_empty() and not args.description:
parser.error("No dataset changes specified.")
dataset = Dataset.from_id(
args.dataset_id, context.datasets, context.files, org_id=args.org
)
dataset.update(metadata_changeset=metadata_changeset, description=args.description)
print(f"Successfully updated dataset '{dataset.dataset_id}'. Record: ")
print(json.dumps(dataset.to_dict(), indent=4))
def update_parser(parser: argparse.ArgumentParser):
parser.add_argument(
"-d", "--dataset-id", type=str, required=True, help=DATASET_ID_HELP
)
add_org_arg(parser)
parser.add_argument(
"--description", help="A new description to add to this dataset"
)
parser.add_argument(
"--put-tags",
help="Add each tag in this sequence if it doesn't exist",
nargs="*", # 0 or more
)
parser.add_argument(
"--remove-tags",
help="Remove each tag in this sequence if it exists",
nargs="*", # 0 or more
)
parser.add_argument(
"--put-metadata",
required=False,
metavar="KEY_PATH=VALUE",
nargs="*",
action=KeyValuePairsAction,
help=(
"Zero or more 'key_path=value' formatted pairs. "
"An attempt is made to parse `value` as JSON; if this fails, `value` is stored as a string. "
"If `key_path` already exists, existing value will be overwritten. "
"Dot notation is supported for nested keys. "
"Examples: "
"--put-metadata 'key1=value1' 'key2.subkey1=value2' 'key3.sublist1=[\"a\",\"b\",\"c\"]'" # noqa: E501
),
)
parser.add_argument(
"--remove-metadata",
required=False,
metavar="KEY_PATH",
nargs="*",
help=(
"Remove each key from dataset metadata if it exists. "
"Dot notation is supported for nested keys. E.g.: --remove-metadata key1 key2.subkey3"
),
)
update_command = RobotoCommand(
name="update",
logic=update,
setup_parser=update_parser,
command_kwargs={"help": "Update an existing dataset."},
) | /roboto_sdk-0.1.27.tar.gz/roboto_sdk-0.1.27/src/roboto_sdk/cli/datasets/update.py | 0.538255 | 0.155848 | update.py | pypi |
import json
import re
from typing import Any, Optional
from ..http import HttpError
from ..serde import safe_dict_drill
__ORG_MESSAGE_PATTERN = re.compile(r"did not provide a org for single-org operation.")
class RobotoDomainException(Exception):
"""
Expected exceptions from the Roboto domain entity objects.
"""
_message: str
_stack_trace: list[str]
def __init__(self, message: str, stack_trace: list[str] = [], *args, **kwargs):
super().__init__(message, *args, **kwargs)
self._message = message
self._stack_trace = stack_trace
@staticmethod
def from_json(contents: dict[str, Any]) -> "RobotoDomainException":
error_code = safe_dict_drill(contents, ["error", "error_code"])
inner_message = safe_dict_drill(contents, ["error", "message"])
if error_code is None or inner_message is None:
raise ValueError("Need 'error_code' and 'message' available.")
for subclass in RobotoDomainException.__subclasses__():
if subclass.__name__ == error_code:
return subclass(message=inner_message)
raise ValueError("Unrecognized error code 'error_code'")
@staticmethod
def from_client_error(error: HttpError) -> "RobotoDomainException":
message: Optional[str]
if type(error.msg) is dict:
# See if it's a first class RobotoException
try:
return RobotoDomainException.from_json(error.msg)
except ValueError:
pass
# Handle JSON from non-roboto calls
message = error.msg.get("message", json.dumps(error.msg))
elif type(error.msg) is str:
message = error.msg
else:
message = None
if error.status is None:
raise RobotoDomainException(error.msg)
if error.status == 400:
if (
message is not None
and "did not provide a org for single-org operation" in message
):
return RobotoNoOrgProvidedException(error.msg)
else:
return RobotoInvalidRequestException(error.msg)
if error.status in (401, 403):
return RobotoUnauthorizedException(error.msg)
if error.status == 404:
return RobotoNotFoundException(error.msg)
if 500 <= error.status < 600:
return RobotoServiceException(error.msg)
raise error
@property
def http_status_code(self) -> int:
return 500
@property
def error_code(self) -> str:
return self.__class__.__name__
@property
def message(self) -> str:
return self._message
@property
def stack_trace(self) -> list[str]:
return self._stack_trace
@stack_trace.setter
def stack_trace(self, stack_trace: list[str]):
self._stack_trace = stack_trace
def to_dict(self) -> dict[str, Any]:
error: dict[str, Any] = {"error_code": self.error_code, "message": self.message}
if len(self._stack_trace) > 0:
error["stack_trace"] = self._stack_trace
return {"error": error}
def serialize(self) -> str:
return json.dumps(self.to_dict())
class RobotoUnauthorizedException(RobotoDomainException):
"""
Thrown when a user is attempting to access a resource that they do not have permission to access
"""
@property
def http_status_code(self) -> int:
return 401
class RobotoNotFoundException(RobotoDomainException):
"""
Throw when a requested resource does not exist
"""
@property
def http_status_code(self) -> int:
return 404
class RobotoInvalidRequestException(RobotoDomainException):
"""
Thrown when request parameters are in some way invalid
"""
@property
def http_status_code(self) -> int:
return 400
class RobotoNoOrgProvidedException(RobotoDomainException):
"""
Thrown when no org is provided to an operation which requires an org.
"""
@property
def http_status_code(self) -> int:
return 400
class RobotoConditionException(RobotoDomainException):
"""
Thrown if there is a failed condition
"""
@property
def http_status_code(self) -> int:
return 409
class RobotoConflictException(RobotoDomainException):
"""
Thrown if there is a conflict between a resource you're creating and another existing resource
"""
@property
def http_status_code(self) -> int:
return 409
class RobotoServiceException(RobotoDomainException):
"""
Thrown when Roboto Service failed in an unexpected way
"""
class RobotoUnknownOperationException(RobotoDomainException):
"""
Thrown if a user is attempting to perform an action unrecognized by the Roboto platform.
"""
@property
def http_status_code(self) -> int:
return 404
class RobotoLimitExceededException(RobotoDomainException):
"""
Thrown if an operation would exceed a user or org level limit.
"""
@property
def http_status_code(self) -> int:
return 403
class RobotoHttpExceptionParse(object):
def __enter__(self):
return self
def __exit__(self, exception_type, exception, traceback):
if issubclass(type(exception), HttpError):
raise RobotoDomainException.from_client_error(error=exception) | /roboto_sdk-0.1.27.tar.gz/roboto_sdk-0.1.27/src/roboto_sdk/exceptions/domain.py | 0.763396 | 0.178025 | domain.py | pypi |
from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import BinaryIO, List, NewType, Optional, Union
from typing_extensions import Literal
from .url import URL
Token = NewType('Token', str)
UserID = NewType('UserID', int)
ChatID = NewType('ChatID', int)
MessageID = NewType('MessageID', int)
InlineMessageID = NewType('InlineMessageID', str)
FileID = NewType('FileID', str)
PollID = NewType('PollID', str)
CallbackQueryID = NewType('CallbackQueryID', str)
@dataclass(frozen=True)
class FileDescription:
"""Describe a file to be sent through the API with customized metadata."""
binary_source: Union[Path, BinaryIO, bytes]
basename: str
mime_type: str = 'application/octet-stream'
# pylint: disable=invalid-triple-quote
InputFile = Union[Path, BinaryIO, FileID, FileDescription, URL]
"""An InputFile can be either:
A Path object: This will be used to open the file and read it to send.
A BufferedIOBase object: This supports sending an already open file.
A FileID from Telegram's Bot API itself: This will just send a previously sent file.
A FileDescription object: This is used to send binary data directly as a file, or to
customize mimetype or filename.
A URL: This instructs Telegram's server to download the file and send it. In
this case, file size is severely limited.
Refer to https://core.telegram.org/bots/api#sending-files for more information.
"""
# pylint: enable=invalid-triple-quote
@dataclass(frozen=True)
class _UserRequiredCommon:
id: UserID
is_bot: bool
first_name: str
@dataclass(frozen=True)
class _BotUserRequired(_UserRequiredCommon):
can_join_groups: bool
can_read_all_group_messages: bool
supports_inline_queries: bool
@dataclass(frozen=True)
class _UserOptionalCommon:
last_name: Optional[str] = None
username: Optional[str] = None
language_code: Optional[str] = None
@dataclass(frozen=True)
class User(_UserOptionalCommon, _UserRequiredCommon):
"""A user returned by the Bot API."""
@dataclass(frozen=True)
class BotUser(_UserOptionalCommon, _BotUserRequired):
"""A Bot user returned by the Bot API (only through getMe)."""
@dataclass(frozen=True)
class ChatPhoto:
"""Information for fetching the chat picture."""
small_file_id: FileID
small_file_unique_id: FileID
big_file_id: FileID
big_file_unique_id: FileID
@dataclass(frozen=True)
class ChatMember:
"""Information about one member of a chat."""
user: User
status: str
custom_title: Optional[str] = None
until_date: Optional[int] = None
can_be_edited: Optional[bool] = None
can_post_messages: Optional[bool] = None
can_edit_messages: Optional[bool] = None
can_delete_messages: Optional[bool] = None
can_restrict_members: Optional[bool] = None
can_promote_members: Optional[bool] = None
can_change_info: Optional[bool] = None
can_invite_users: Optional[bool] = None
can_pin_messages: Optional[bool] = None
is_member: Optional[bool] = None
can_send_messages: Optional[bool] = None
can_send_media_messages: Optional[bool] = None
can_send_polls: Optional[bool] = None
can_send_other_messages: Optional[bool] = None
can_add_web_page_previews: Optional[bool] = None
@dataclass(frozen=True)
class ChatPermissions:
"""Actions that a non-administrator user is allowed to take in a chat."""
can_send_messages: Optional[bool] = None
can_send_media_messages: Optional[bool] = None
can_send_polls: Optional[bool] = None
can_send_other_messages: Optional[bool] = None
can_add_web_page_previews: Optional[bool] = None
can_change_info: Optional[bool] = None
can_invite_users: Optional[bool] = None
can_pin_messages: Optional[bool] = None
@dataclass(frozen=True)
class Chat:
"""Representation of a given chat."""
id: ChatID
type: str
title: Optional[str] = None
username: Optional[str] = None
first_name: Optional[str] = None
last_name: Optional[str] = None
all_members_are_administrators: Optional[bool] = None
photo: Optional[ChatPhoto] = None
description: Optional[str] = None
invite_link: Optional[str] = None
pinned_message: Optional[Message] = None
permissions: Optional[ChatPermissions] = None
slow_mode_delay: Optional[int] = None
sticker_set_name: Optional[str] = None
can_set_sticker_set: Optional[bool] = None
@dataclass(frozen=True)
class MessageEntity:
"""An entity inside a message (hashtags, links...)"""
type: str
offset: int
length: int
url: Optional[str] = None
user: Optional[User] = None
language: Optional[str] = None
@dataclass(frozen=True)
class PhotoSize:
"""Data about an image size both in pixels and bytes."""
file_id: FileID
file_unique_id: FileID
width: int
height: int
file_size: Optional[int] = None
@dataclass(frozen=True)
class Audio:
"""Metadata about an audio message."""
file_id: FileID
file_unique_id: FileID
duration: int
performer: Optional[str] = None
title: Optional[str] = None
mime_type: Optional[str] = None
file_size: Optional[int] = None
thumb: Optional[PhotoSize] = None
@dataclass(frozen=True)
class Document:
"""Metadata about a generic file."""
file_id: FileID
file_unique_id: FileID
thumb: Optional[PhotoSize] = None
file_name: Optional[str] = None
mime_type: Optional[str] = None
file_size: Optional[int] = None
@dataclass(frozen=True)
class Animation:
"""Metadata about a message with an animation (gif, mp4)."""
file_id: FileID
file_unique_id: FileID
width: int
height: int
duration: int
thumb: Optional[PhotoSize] = None
file_name: Optional[str] = None
mime_type: Optional[str] = None
file_size: Optional[int] = None
@dataclass(frozen=True)
class Game:
"""Data about a Telegram Game."""
title: str
description: str
photo: List[PhotoSize]
text: Optional[str] = None
text_entities: Optional[List[MessageEntity]] = None
animation: Optional[Animation] = None
@dataclass(frozen=True)
class MaskPosition:
"""Information about where to put a mask on a face."""
point: str
x_shift: float
y_shift: float
scale: float
@dataclass(frozen=True)
class Sticker:
"""Metadata about a given sticker."""
file_id: FileID
width: int
height: int
thumb: Optional[PhotoSize] = None
emoji: Optional[str] = None
set_name: Optional[str] = None
mask_position: Optional[MaskPosition] = None
file_size: Optional[int] = None
@dataclass(frozen=True)
class Video:
"""Metadata about a video message."""
file_id: FileID
file_unique_id: FileID
width: int
height: int
duration: int
thumb: Optional[PhotoSize] = None
mime_type: Optional[str] = None
file_size: Optional[int] = None
@dataclass(frozen=True)
class Voice:
"""Metadata about a voice message."""
file_id: FileID
file_unique_id: FileID
duration: int
mime_type: Optional[str] = None
file_size: Optional[int] = None
@dataclass(frozen=True)
class VideoNote:
"""Metadata on a video note."""
file_id: FileID
file_unique_id: FileID
length: int
duration: int
thumb: Optional[PhotoSize] = None
file_size: Optional[int] = None
@dataclass(frozen=True)
class Contact:
"""Representation of a contact."""
phone_number: str
first_name: str
last_name: Optional[str] = None
user_id: Optional[UserID] = None
vcard: Optional[str] = None
@dataclass(frozen=True)
class Location:
"""A GPS location."""
longitude: float
latitude: float
@dataclass(frozen=True)
class Venue:
"""A venue on Foursquare."""
location: Location
title: str
address: str
foursquare_id: Optional[str] = None
foursquare_type: Optional[str] = None
@dataclass(frozen=True)
class PollOption:
"""Option/Answer for a Poll"""
text: str
voter_count: int
@dataclass(frozen=True)
class PollAnswer:
"""An answer of a user in a non-anonymous poll."""
poll_id: str
user: User
option_ids: List[int]
class PollType(Enum):
"""The type of a poll."""
QUIZ = 'quiz'
REGULAR = 'regular'
@dataclass(frozen=True)
class Poll:
"""Representation of a Poll."""
id: PollID
question: str
options: List[PollOption]
total_voter_count: int
is_closed: bool
is_anonymous: bool
type: str
allows_multiple_answers: bool
correct_option_id: Optional[int] = None
explanation: Optional[str] = None
explanation_entities: Optional[List[MessageEntity]] = None
open_period: Optional[int] = None
close_date: Optional[int] = None
class DiceEmoji(Enum):
"""Supported emojis for sending Dice messages."""
DICE = '🎲'
DART = '🎯'
BASKETBALL = '🏀'
@dataclass(frozen=True)
class Dice:
"""Representation of a Dice"""
emoji: str
value: int
@dataclass(frozen=True)
class UserProfilePhotos:
"""A user's profile pictures."""
total_count: int
photos: List[List[PhotoSize]]
@dataclass(frozen=True)
class File:
"""A file ready to be downloaded."""
file_id: FileID
file_unique_id: FileID
file_size: Optional[int]
file_path: Optional[str]
@dataclass(frozen=True)
class Invoice:
"""A billing invoice."""
title: str
description: str
start_parameter: str
currency: str
total_amount: int
@dataclass(frozen=True)
class ShippingAddress:
"""An address for online purchases."""
country_code: str
state: str
city: str
street_line1: str
street_line2: str
post_code: str
@dataclass(frozen=True)
class OrderInfo:
"""Information about an order."""
name: Optional[str] = None
phone_number: Optional[str] = None
email: Optional[str] = None
shipping_address: Optional[ShippingAddress] = None
@dataclass(frozen=True)
class SuccessfulPayment:
"""Confirmation data for a successful payment."""
currency: str
total_amount: int
invoice_payload: str
telegram_payment_charge_id: str
provider_payment_charge_id: str
shipping_option_id: Optional[str] = None
order_info: Optional[OrderInfo] = None
@dataclass(frozen=True)
class PassportData:
"""Information about Telegram Passport data shared with the bot by the user."""
data: List[EncryptedPassportElement]
credentials: EncryptedCredentials
@dataclass(frozen=True)
class PassportFile:
"""A file uploaded to Telegram Passport.
Currently all Telegram Passport files are in JPEG format when decrypted
and don't exceed 10MB.
"""
file_id: str
file_unique_id: str
file_size: int
file_date: int
@dataclass(frozen=True)
class EncryptedPassportElement:
"""Information about Telegram Passport elements shared with the bot by the user."""
type: str
hash: str
data: Optional[str] = None
phone_number: Optional[str] = None
email: Optional[str] = None
files: Optional[List[PassportFile]] = None
front_side: Optional[PassportFile] = None
reverse_side: Optional[PassportFile] = None
selfie: Optional[PassportFile] = None
translation: Optional[List[PassportFile]] = None
@dataclass(frozen=True)
class EncryptedCredentials:
"""Data required for decrypting and authenticating EncryptedPassportElement.
See the Telegram Passport Documentation for a complete description of the
data decryption and authentication processes.
"""
data: str
hash: str
secret: str
@dataclass(frozen=True)
class InlineKeyboardMarkup:
"""Represents an inline keyboard that appears next to the message it belongs to."""
inline_keyboard: List[List[InlineKeyboardButton]]
@dataclass(frozen=True)
class InlineKeyboardButton:
"""One button of an inline keyboard.
You must use exactly one of the optional fields.
"""
text: str
url: Optional[str] = None
login_url: Optional[LoginUrl] = None
callback_data: Optional[str] = None
switch_inline_query: Optional[str] = None
switch_inline_query_current_chat: Optional[str] = None
callback_game: Optional[CallbackGame] = None
pay: Optional[bool] = None
@dataclass(frozen=True)
class CallbackGame:
"""A placeholder, currently holds no information."""
@dataclass(frozen=True)
class _MessageBase:
"""Base data for a Telegram Message.
This class is made this way to permit
MessageWithNoReply and Message to have no inheritance relationship
"""
message_id: MessageID
date: int
chat: Chat
from_: Optional[User] = field(metadata={'rename': 'from'})
forward_from: Optional[User] = None
forward_from_chat: Optional[Chat] = None
forward_from_message_id: Optional[int] = None
forward_signature: Optional[str] = None
forward_sender_name: Optional[str] = None
forward_date: Optional[int] = None
via_bot: Optional[User] = None
edit_date: Optional[int] = None
media_group_id: Optional[str] = None
author_signature: Optional[str] = None
text: Optional[str] = None
entities: Optional[List[MessageEntity]] = None
caption_entities: Optional[List[MessageEntity]] = None
audio: Optional[Audio] = None
document: Optional[Document] = None
animation: Optional[Animation] = None
game: Optional[Game] = None
photo: Optional[List[PhotoSize]] = None
sticker: Optional[Sticker] = None
video: Optional[Video] = None
voice: Optional[Voice] = None
video_note: Optional[VideoNote] = None
caption: Optional[str] = None
contact: Optional[Contact] = None
location: Optional[Location] = None
venue: Optional[Venue] = None
poll: Optional[Poll] = None
dice: Optional[Dice] = None
new_chat_members: Optional[List[User]] = None
left_chat_member: Optional[User] = None
new_chat_title: Optional[str] = None
new_chat_photo: Optional[List[PhotoSize]] = None
delete_chat_photo: Optional[bool] = None
group_chat_created: Optional[bool] = None
supergroup_chat_created: Optional[bool] = None
chat_channel_created: Optional[bool] = None
migrate_to_chat_id: Optional[int] = None
migrate_from_chat_id: Optional[int] = None
invoice: Optional[Invoice] = None
successful_payment: Optional[SuccessfulPayment] = None
connected_website: Optional[str] = None
passport_data: Optional[PassportData] = None
reply_markup: Optional[InlineKeyboardMarkup] = None
@dataclass(frozen=True)
class MessageWithNoReply(_MessageBase):
""" A Message object without reply_to_message and pinned_message.
This class exists to satisfy a particular specifity on
reply_to_message and pinned_message arguments on the Message class {
Type Message, note that the Message object in this field will not
contain further reply_to_message fields even if it itself is a reply.
}
"""
@dataclass(frozen=True)
class Message(_MessageBase):
"""Data for a Telegram message."""
reply_to_message: Optional[MessageWithNoReply] = None
pinned_message: Optional[MessageWithNoReply] = None
@dataclass(frozen=True)
class InlineQuery:
"""An incoming inline query."""
id: str
query: str
offset: str
from_: User = field(metadata={'rename': 'from'})
location: Optional[Location] = None
@dataclass(frozen=True)
class ChosenInlineResult:
"""The result of an inline query that was chosen by the user."""
result_id: str
query: str
from_: User = field(metadata={'rename': 'from'})
location: Optional[Location] = None
inline_message_id: Optional[str] = None
@dataclass(frozen=True)
class CallbackQuery:
"""An incoming callback from an inline keyboard."""
id: CallbackQueryID
from_: User = field(metadata={'rename': 'from'})
message: Optional[Message] = None
inline_message_id: Optional[str] = None
chat_instance: Optional[str] = None
data: Optional[str] = None
game_short_name: Optional[str] = None
@dataclass(frozen=True)
class ShippingQuery:
"""An incoming shipping query."""
id: str
invoide_payload: str
shipping_address: ShippingAddress
from_: User = field(metadata={'rename': 'from'})
@dataclass(frozen=True)
class PreCheckoutQuery:
"""An incoming pre-checkout query."""
id: str
currency: str
total_amount: int
invoice_payload: str
from_: User = field(metadata={'rename': 'from'})
shipping_option_id: Optional[str] = None
order_info: Optional[OrderInfo] = None
@dataclass(frozen=True)
class Update:
"""An update for the bot.
At most one of the optional parameters can be present.
"""
update_id: int
message: Optional[Message] = None
edited_message: Optional[Message] = None
channel_post: Optional[Message] = None
edited_channel_post: Optional[Message] = None
inline_query: Optional[InlineQuery] = None
chosen_inline_result: Optional[ChosenInlineResult] = None
callback_query: Optional[CallbackQuery] = None
shipping_query: Optional[ShippingQuery] = None
pre_checkout_query: Optional[PreCheckoutQuery] = None
@dataclass(frozen=True)
class BotCommand:
"""This object represents a bot command."""
command: str
description: str
@dataclass(frozen=True)
class ResponseParameters:
"""Information about why a request was unsuccessful."""
migrate_to_chat_id: Optional[int]
retry_after: Optional[int]
@dataclass(frozen=True)
class InputMediaPhoto:
"""A photo to be sent."""
media: InputFile
caption: Optional[str] = None
parse_mode: Optional[str] = None
type: Literal['photo'] = field(default='photo', init=False)
@dataclass(frozen=True)
class InputMediaVideo:
"""The content of a media message to be sent.
Can be of type: Animation, Document, Audio, Photo and Video.
"""
media: InputFile
thumb: Optional[Union[URL, FileID]] = None
caption: Optional[str] = None
parse_mode: Optional[str] = None
width: Optional[int] = None
height: Optional[int] = None
duration: Optional[int] = None
supports_streaming: Optional[bool] = None
type: Literal['video'] = field(default='video', init=False)
@dataclass(frozen=True)
class InputMediaAnimation:
"""The content of a media message to be sent.
Can be of type: Animation, Document, Audio, Photo and Video.
"""
media: InputFile
thumb: Optional[Union[URL, FileID]] = None
caption: Optional[str] = None
parse_mode: Optional[str] = None
width: Optional[int] = None
height: Optional[int] = None
duration: Optional[int] = None
performer: Optional[str] = None
type: Literal['animation'] = field(default='animation', init=False)
@dataclass(frozen=True)
class InputMediaAudio:
"""The content of a media message to be sent.
Can be of type: Animation, Document, Audio, Photo and Video.
"""
media: InputFile
thumb: Optional[Union[URL, FileID]] = None
caption: Optional[str] = None
parse_mode: Optional[str] = None
duration: Optional[int] = None
performer: Optional[str] = None
title: Optional[str] = None
type: Literal['animation'] = field(default='animation', init=False)
@dataclass(frozen=True)
class InputMediaDocument:
"""A photo to be sent."""
media: InputFile
caption: Optional[str] = None
parse_mode: Optional[str] = None
thumb: Optional[Union[URL, FileID]] = None
type: Literal['document'] = field(default='document', init=False)
InputMedia = Union[
InputMediaAnimation,
InputMediaAudio,
InputMediaDocument,
InputMediaPhoto,
InputMediaVideo,
]
class ParseMode(Enum):
"""Parse mode for applying markup to text messages."""
MARKDOWN = 'Markdown'
HTML = 'HTML'
@dataclass(frozen=True)
class LoginUrl:
"""A parameter used to automatically authorize a user.
Used in inline keyboard buttons.
"""
url: str
forward_text: Optional[str] = None
bot_username: Optional[str] = None
request_write_access: Optional[bool] = None
@dataclass(frozen=True)
class KeyboardButtonPollType:
"""The type of a poll.
The poll is allowed to be created and sent when the corresponding button is
pressed.
"""
type: str
@dataclass(frozen=True)
class KeyboardButton:
"""One button of the reply keyboard.
At most one of the optional parameters can be present.
"""
text: str
request_contact: Optional[bool] = None
request_location: Optional[bool] = None
request_poll: Optional[KeyboardButtonPollType] = None
@dataclass(frozen=True)
class ReplyKeyboardMarkup:
"""A custom keyboard with reply options.
At most one of the optional parameters can be present.
"""
keyboard: List[List[KeyboardButton]]
resize_keyboard: Optional[bool] = None
one_time_keyboard: Optional[bool] = None
selective: Optional[bool] = None
@dataclass(frozen=True)
class ReplyKeyboardRemove:
"""Request for a client to remove the custom current keyboard."""
remove_keyboard: bool = field(default=True, init=False)
selective: Optional[bool] = None
@dataclass(frozen=True)
class ForceReply:
"""Request for a client to display a reply interface to the user."""
force_reply: bool = field(default=True, init=False)
selective: Optional[bool] = None
ReplyMarkup = Union[
InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply
]
class ChatAction(Enum):
"""Actions to display to show that the bot is doing something before sending."""
TYPING = 'typing'
UPLOAD_PHOTO = 'upload_photo'
RECORD_VIDEO = 'record_video'
UPLOAD_VIDEO = 'upload_video'
RECORD_AUDIO = 'record_audio'
UPLOAD_AUDIO = 'upload_audio'
UPLOAD_DOCUMENT = 'upload_document'
FIND_LOCATION = 'find_location'
RECORD_VIDEO_NOTE = 'record_video_note'
UPLOAD_VIDEO_NOTE = 'upload_video_note'
__all__ = [
'Animation',
'Audio',
'BotCommand',
'BotUser',
'CallbackGame',
'CallbackQuery',
'CallbackQueryID',
'Chat',
'ChatAction',
'ChatID',
'ChatMember',
'ChatPermissions',
'ChatPhoto',
'ChosenInlineResult',
'Contact',
'Dice',
'DiceEmoji',
'Document',
'EncryptedCredentials',
'EncryptedPassportElement',
'File',
'FileDescription',
'FileID',
'ForceReply',
'Game',
'InlineKeyboardButton',
'InlineKeyboardMarkup',
'InlineMessageID',
'InlineQuery',
'InputMedia',
'InputMediaAnimation',
'InputMediaAudio',
'InputMediaDocument',
'InputMediaPhoto',
'InputMediaVideo',
'InputFile',
'Invoice',
'KeyboardButton',
'KeyboardButtonPollType',
'Location',
'LoginUrl',
'MaskPosition',
'Message',
'MessageWithNoReply',
'MessageEntity',
'MessageID',
'OrderInfo',
'ParseMode',
'PassportData',
'PassportFile',
'PhotoSize',
'Poll',
'PollAnswer',
'PollID',
'PollOption',
'PollType',
'PreCheckoutQuery',
'ReplyMarkup',
'ReplyKeyboardMarkup',
'ReplyKeyboardRemove',
'ResponseParameters',
'ShippingAddress',
'ShippingQuery',
'Sticker',
'SuccessfulPayment',
'Token',
'Update',
'User',
'UserID',
'UserProfilePhotos',
'Venue',
'Video',
'VideoNote',
'Voice',
] | /roboto-telegram-0.4.0.tar.gz/roboto-telegram-0.4.0/roboto/api_types.py | 0.894444 | 0.264109 | api_types.py | pypi |
from dataclasses import asdict, fields, is_dataclass
from enum import Enum
from typing import (
Any,
Dict,
List,
Optional,
Type,
TypeVar,
Union,
cast,
get_type_hints,
overload,
)
from typing_extensions import Literal
from typing_inspect import get_args, get_origin, is_optional_type
from .error import RobotoError
from .typing_util import is_new_type, is_none_type, original_type, type_name
T = TypeVar('T')
JSONPrimitives = Optional[Union[int, float, str, bool]]
JSONLike = Union[JSONPrimitives, Dict[str, Any], List[Any]]
def renames(cls: Type[T]) -> Dict[str, str]:
"""Get all serialization renames from a dataclass.
`cls` is expected to be a Dataclass type.
Args:
cls: A dataclass type.
Return:
A mapping of class attribute names to the name they expect when
deserializing.
"""
return {
field.metadata['rename']: field.name
for field in fields(cls)
if 'rename' in field.metadata
}
def from_list(tp: Type[List[T]], v: List[Any]) -> List[T]:
"""Transform a list of JSON-like structures into JSON-compatible objects."""
(inner_type,) = get_args(tp)
return [_from_json_like(inner_type, value) for value in v]
def from_dict(tp: Type[T], v: Dict[str, Any]) -> T:
"""Transform a JSON-like structure into a JSON-compatible dataclass."""
field_renames = renames(tp)
type_hints = get_type_hints(tp)
for k in field_renames:
if k in v:
v[field_renames[k]] = v.pop(k)
return tp( # type: ignore
**{
field_name: _from_json_like(type_hints[field_name], value)
for field_name, value in v.items()
if field_name in type_hints
}
)
class JSONConversionError(RobotoError):
"""Signal error trying to read JSON-like data into a given type."""
def __init__(self, message, schema_class, value):
super().__init__(message)
self.schema_class = schema_class
self.value = value
def convert_single(tp: Type[T], v: Any) -> T:
"""Convert a value into a single (non-list) type."""
if tp in (int, float):
if not isinstance(v, (int, float)):
raise JSONConversionError(f'Cannot read value {v} as a number.', tp, v)
return tp(v) # type: ignore
if is_dataclass(tp):
if not isinstance(v, dict):
raise JSONConversionError(
f'Cannot read non-dict {v} as dataclass type {tp}.', tp, v,
)
return from_dict(tp, v)
real_type = tp if not is_new_type(tp) else original_type(tp)
if not isinstance(v, real_type):
raise JSONConversionError(
f'Cannot find any way to read value {v} as {tp}.', tp, v
)
return cast(T, v)
def _from_json_like(type_hint, value):
optional = is_optional_type(type_hint)
(real_type,) = (
(t for t in get_args(type_hint) if not is_none_type(t))
if optional
else (type_hint,)
)
if real_type is Any:
return value
return from_json_like(real_type, value, optional)
@overload
def from_json_like(
tp: Type[List[T]], value: List[JSONLike], optional: Literal[True],
) -> Optional[List[T]]: # pragma: no cover
"""Overload for from_json_like, refer to implementation."""
...
@overload
def from_json_like(
tp: Type[T], value: JSONLike, optional: Literal[True],
) -> Optional[T]: # pragma: no cover
"""Overload for from_json_like, refer to implementation."""
...
@overload
def from_json_like(
tp: Type[List[T]], value: List[JSONLike], optional: Literal[False] = False,
) -> List[T]: # pragma: no cover
"""Overload for from_json_like, refer to implementation."""
...
@overload
def from_json_like(
tp: Type[T], value: JSONLike, optional: Literal[False] = False,
) -> T: # pragma: no cover
"""Overload for from_json_like, refer to implementation."""
...
def from_json_like(tp: Type[T], value: Any, optional: bool = False) -> Optional[T]:
"""Read a JSON-like object into a given schema type.
`tp` must be:
- a JSON primitive type (int, float, str, bool or NoneType),
- a List[T] of a JSON-compatible type, or
- a dataclass where every field is of a JSON-compatible type
Args:
tp: A JSON-compatible type.
value: A JSON-compatible value to read.
optional: Whether None should be accepted.
Returns:
An object of the type given by `tp`, or maybe None if `optional` is `True`.
"""
if value is None:
if not optional:
raise JSONConversionError(
'Cannot read None as a non optional value.', tp, value
)
return None
if get_origin(tp) is list:
if not isinstance(value, list):
raise JSONConversionError(
'Cannot read non-list value to a list type.', tp, value
)
return from_list(tp, value) # type: ignore
return convert_single(tp, value)
def to_json_like(obj: Any) -> JSONLike:
"""Serialize an object to a JSON-compatible representation.
`obj` must be:
- a JSON primitive (int, float, str, bool or None),
- an object of a dataclass where every field is JSON-compatible, or
- a list of JSON-compatible objects.
Args:
obj: The object to serialize.
Returns:
A representation that can be converted to JSON.
"""
if isinstance(obj, get_args(JSONPrimitives)): # type: ignore
return obj
if isinstance(obj, dict):
return {k: to_json_like(v) for k, v in obj.items() if v is not None}
if is_dataclass(obj):
return {k: to_json_like(v) for k, v in asdict(obj).items() if v is not None}
if isinstance(obj, list):
return [to_json_like(v) for v in obj]
if isinstance(obj, Enum):
return to_json_like(obj.value)
obj_type = type(obj)
raise JSONConversionError(
f'Failed to turn value of type {type_name(obj_type)} into a JSONLike.',
obj_type,
obj,
) | /roboto-telegram-0.4.0.tar.gz/roboto-telegram-0.4.0/roboto/datautil.py | 0.944318 | 0.367838 | datautil.py | pypi |
from collections import ChainMap
from dataclasses import dataclass
from enum import Enum
from typing import Any, Awaitable, Callable, Dict, Optional, Union
from asks import Session
from asks.multipart import MultipartData
from asks.response_objects import Response
from typing_extensions import Literal, Protocol
from .api_types import FileDescription
from .datautil import from_json_like, to_json_like
from .error import BotAPIError
class APIResult(Protocol):
"""API success response."""
ok: Literal[True]
result: Any = None
class APIError(Protocol):
"""API error response."""
ok: Literal[False]
error_code: int
description: str
APIResponse = Union[APIResult, APIError]
@dataclass(frozen=True)
class AnyAPIResponse:
"""API Response format."""
ok: bool
result: Optional[Any] = None
error_code: Optional[int] = None
description: Optional[str] = None
class HTTPMethod(Enum):
"""HTTP Methods"""
GET = 'get'
POST = 'post'
PUT = 'put'
def validate_response(response: APIResponse) -> Any:
"""Validate a Telegram Bot API Response.
Args:
response: A Telegram Bot API response to validate.
Returns:
The contents of the response if it response.ok is true.
Raises:
BotAPIError: If response.ok is false.
"""
if not response.ok:
raise BotAPIError(
response.error_code, response.description,
)
return response.result
async def _json_request(
session: Session, method: HTTPMethod, api_method: str, body: Any = None
) -> Response:
return await session.request(method.value, path=api_method, json=to_json_like(body))
def _to_multipart_compatible(value: Any) -> Any:
"""Transform values into multipart/form-data compatible versions."""
if isinstance(value, FileDescription):
return MultipartData(value.binary_source, value.mime_type, value.basename)
return value
async def _multipart_request_from_dict(
session: Session, method: HTTPMethod, api_method: str, body: Dict[str, Any]
) -> Response:
fields = {k: _to_multipart_compatible(v) for k, v in body.items() if v is not None}
return await session.request(method.value, path=api_method, multipart=fields)
async def _multipart_request(
session: Session, method: HTTPMethod, api_method: str, body: Any
) -> Response:
return await _multipart_request_from_dict(
session, method, api_method, body.__dict__
)
APIRequester = Callable[[Session, HTTPMethod, str, Any], Awaitable[Response]]
async def _make_request(
requester: APIRequester,
session: Session,
method: HTTPMethod,
api_method: str,
body: Any = None,
) -> Any:
content = await requester(session, method, api_method, body)
# We know that the server ensures the object will follow either protocol,
# but mypy can't see that.
response: Any = from_json_like(AnyAPIResponse, content.json())
return validate_response(response)
async def make_request(
session: Session, method: HTTPMethod, api_method: str, body: Any = None
) -> Any:
"""Basic request function for the telegram API
Args:
session: An `asks.Session` object with the correct `base_location` and
`endpoint` set up.
method: The HTTP method to use.
api_method: The Telegram API method to call.
body: An object to send as JSON.
Returns:
The APIResponse contents if everything went right.
Raises:
BotAPIError: If response.ok is false.
"""
return await _make_request(_json_request, session, method, api_method, body)
async def make_multipart_request(session: Session, api_method: str, body: Any) -> Any:
"""Function for doing POST multipart/form-data requests.
Useful for requests that send files.
Args:
session: An `asks.Session` object with the correct `base_location` and
`endpoint` set up.
api_method: The HTTP method to use.
body: An object to send as JSON.
Returns:
The APIResponse contents if everything went right.
Raises:
BotAPIError: If response.ok is false.
"""
return await _make_request(
_multipart_request, session, HTTPMethod.POST, api_method, body
)
async def make_multipart_request_with_attachments(
session: Session,
api_method: str,
body: Any,
attachments: Dict[str, FileDescription],
) -> Any:
"""Function for doing POST multipart/form-data requests with attachments.
Attachments are defined by Telegram API.
Useful for requests that send files.
Args:
session: An `asks.Session` object with the correct `base_location` and
`endpoint` set up.
api_method: The HTTP method to use.
body: An object to send as JSON.
Returns:
The APIResponse contents if everything went right.
Raises:
BotAPIError: If response.ok is false.
"""
fields = dict(ChainMap(body.__dict__, attachments))
content = await _multipart_request_from_dict(
session, HTTPMethod.POST, api_method, body=fields,
)
response: Any = from_json_like(AnyAPIResponse, content.json())
return validate_response(response) | /roboto-telegram-0.4.0.tar.gz/roboto-telegram-0.4.0/roboto/http_api.py | 0.961678 | 0.175786 | http_api.py | pypi |
import json
from dataclasses import dataclass
from typing import Generic, List, Optional, TypeVar, Union, cast
from .api_types import (
BotCommand,
CallbackQueryID,
ChatAction,
ChatID,
ChatPermissions,
DiceEmoji,
FileID,
InlineKeyboardMarkup,
InlineMessageID,
InputFile,
InputMedia,
InputMediaPhoto,
InputMediaVideo,
MessageID,
ParseMode,
PollType,
ReplyMarkup,
UserID,
)
from .datautil import to_json_like
T = TypeVar('T')
class JSONSerialized(str, Generic[T]):
"""Strong type for the JSON serialized version of a type."""
def json_serialize(value: T) -> JSONSerialized[T]:
"""Serialize value to its strong-typed JSON string type."""
return cast(JSONSerialized[T], json.dumps(to_json_like(value)))
def maybe_json_serialize(value: Optional[T]) -> Optional[JSONSerialized[T]]:
"""Serialize value to its strong-typed JSON string type."""
if value is None:
return None
return json_serialize(value)
@dataclass(frozen=True)
class GetUpdatesRequest:
"""Parameters for getting updates for a bot."""
offset: Optional[int] = None
limit: Optional[int] = None
timeout: Optional[int] = None
allowed_updates: Optional[List[str]] = None
@dataclass(frozen=True)
class SendMessageRequest:
"""Parameters for sending a message."""
chat_id: Union[ChatID, str]
text: str
parse_mode: Optional[ParseMode] = None
disable_web_page_preview: Optional[bool] = None
disable_notification: Optional[bool] = None
reply_to_message_id: Optional[MessageID] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class ForwardMessageRequest:
"""Parameters for forwarding a message."""
chat_id: Union[ChatID, str]
from_chat_id: Union[ChatID, str]
message_id: MessageID
disable_notification: Optional[bool] = None
@dataclass(frozen=True)
class SendPhotoRequest:
"""Parameters for sending a photo."""
chat_id: Union[ChatID, str]
photo: InputFile
caption: Optional[str] = None
parse_mode: Optional[ParseMode] = None
disable_notification: Optional[bool] = None
reply_to_message_id: Optional[MessageID] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class SendAudioRequest:
"""Parameters for sending an audio."""
chat_id: Union[ChatID, str]
audio: InputFile
caption: Optional[str] = None
parse_mode: Optional[ParseMode] = None
duration: Optional[int] = None
performer: Optional[str] = None
title: Optional[str] = None
thumb: Optional[InputFile] = None
disable_notification: Optional[bool] = None
reply_to_message_id: Optional[MessageID] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class SendDocumentRequest:
"""Parameters for sending a document."""
chat_id: Union[ChatID, str]
document: InputFile
thumb: Optional[InputFile] = None
caption: Optional[str] = None
parse_mode: Optional[ParseMode] = None
disable_notification: Optional[bool] = None
reply_to_message_id: Optional[MessageID] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class SendVideoRequest:
"""Parameters for sending a video."""
chat_id: Union[ChatID, str]
video: InputFile
duration: Optional[int] = None
width: Optional[int] = None
height: Optional[int] = None
thumb: Optional[InputFile] = None
caption: Optional[str] = None
parse_mode: Optional[ParseMode] = None
supports_streaming: Optional[bool] = None
disable_notification: Optional[bool] = None
reply_to_message_id: Optional[MessageID] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class SendAnimationRequest:
"""Parameters for sending an animation."""
chat_id: Union[ChatID, str]
animation: InputFile
duration: Optional[int] = None
width: Optional[int] = None
height: Optional[int] = None
thumb: Optional[InputFile] = None
caption: Optional[str] = None
parse_mode: Optional[ParseMode] = None
disable_notification: Optional[bool] = None
reply_to_message_id: Optional[MessageID] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class SendVoiceRequest:
"""Parameters for sending a voice note (OGG/OPUS audio)."""
chat_id: Union[ChatID, str]
voice: InputFile
caption: Optional[str] = None
parse_mode: Optional[ParseMode] = None
duration: Optional[int] = None
disable_notification: Optional[bool] = None
reply_to_message_id: Optional[MessageID] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class SendVideoNoteRequest:
"""Parameters for sending a video note (rounded square mp4 videos)."""
chat_id: Union[ChatID, str]
video_note: InputFile
duration: Optional[int] = None
length: Optional[int] = None
thumb: Optional[InputFile] = None
disable_notification: Optional[bool] = None
reply_to_message_id: Optional[MessageID] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class SendMediaGroupRequest:
"""Parameters for sending a group of photos or videos as an album."""
chat_id: Union[ChatID, str]
media: JSONSerialized[List[Union[InputMediaPhoto, InputMediaVideo]]]
disable_notification: Optional[bool] = None
reply_to_message_id: Optional[MessageID] = None
@dataclass(frozen=True)
class SendLocationRequest:
"""Parameters for sending a point on the map."""
chat_id: Union[ChatID, str]
latitude: float
longitude: float
live_period: Optional[int] = None
disable_notification: Optional[bool] = None
reply_to_message_id: Optional[MessageID] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class EditMessageLiveLocationRequest:
"""Parameters for editing a live location non-inline message."""
chat_id: Union[ChatID, str]
message_id: MessageID
latitude: float
longitude: float
reply_markup: Optional[JSONSerialized[InlineKeyboardMarkup]] = None
@dataclass(frozen=True)
class EditInlineMessageLiveLocationRequest:
"""Parameters for editing a live location inline message."""
inline_message_id: InlineMessageID
latitude: float
longitude: float
reply_markup: Optional[JSONSerialized[InlineKeyboardMarkup]] = None
@dataclass(frozen=True)
class StopMessageLiveLocationRequest:
"""Parameters for stopping a live location non-inline message."""
chat_id: Union[ChatID, str]
message_id: MessageID
reply_markup: Optional[JSONSerialized[InlineKeyboardMarkup]] = None
@dataclass(frozen=True)
class StopInlineMessageLiveLocationRequest:
"""Parameters for stopping a live location inline message."""
inline_message_id: InlineMessageID
reply_markup: Optional[JSONSerialized[InlineKeyboardMarkup]] = None
@dataclass(frozen=True)
class SendVenueRequest:
"""Parameters for sending information about a venue."""
chat_id: Union[ChatID, str]
latitude: float
longitude: float
title: str
address: str
foursquare_id: Optional[str] = None
foursquare_type: Optional[str] = None
disable_notification: Optional[bool] = None
reply_to_message_id: Optional[MessageID] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class SendContactRequest:
"""Parameters for sending a phone contact."""
chat_id: Union[ChatID, str]
phone_number: str
first_name: str
last_name: Optional[str] = None
vcard: Optional[str] = None
disable_notification: Optional[bool] = None
reply_to_message_id: Optional[MessageID] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class SendPollRequest:
"""Parameters for sending a native poll."""
chat_id: Union[ChatID, str]
question: str
options: JSONSerialized[List[str]]
is_anonymous: Optional[bool] = None
type: Optional[PollType] = None
allows_multiple_answers: Optional[bool] = None
correct_option_id: Optional[int] = None
explanation: Optional[str] = None
explanation_parse_mode: Optional[ParseMode] = None
open_period: Optional[int] = None
close_date: Optional[int] = None
is_closed: Optional[bool] = None
disable_notification: Optional[bool] = None
reply_to_message_id: Optional[MessageID] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class StopPollRequest:
"""Parameters for stopping a poll sent by the bot."""
chat_id: Union[ChatID, str]
message_id: MessageID
reply_markup: Optional[JSONSerialized[InlineKeyboardMarkup]] = None
@dataclass(frozen=True)
class SendDiceRequest:
"""Parameters for sending a Dice."""
chat_id: Union[ChatID, str]
emoji: Optional[DiceEmoji] = None
disable_notification: Optional[bool] = None
reply_to_message_id: Optional[MessageID] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class SendChatActionRequest:
"""Parameters for sending a chat action."""
chat_id: Union[ChatID, str]
action: ChatAction
@dataclass(frozen=True)
class GetUserProfilePhotosRequest:
"""Parameters for getting a list of user profile pictures."""
user_id: UserID
offset: Optional[int] = None
limit: Optional[int] = None
@dataclass(frozen=True)
class GetFileRequest:
"""Parameters for getting information to download a file."""
file_id: FileID
@dataclass(frozen=True)
class KickChatMemberRequest:
"""Parameters for kicking a chat member."""
chat_id: Union[ChatID, str]
user_id: UserID
until_date: Optional[int] = None
@dataclass(frozen=True)
class UnbanChatMemberRequest:
"""Parameters for unbanning a chat member."""
chat_id: Union[ChatID, str]
user_id: UserID
@dataclass(frozen=True)
class RestrictChatMemberRequest:
"""Parameters for restricting permissions of a chat member."""
chat_id: Union[ChatID, str]
user_id: UserID
permissions: JSONSerialized[ChatPermissions]
until_date: Optional[int] = None
@dataclass(frozen=True)
class PromoteChatMemberRequest:
"""Parameters for promoting a chat member to administrator."""
chat_id: Union[ChatID, str]
user_id: UserID
can_change_info: Optional[bool] = None
can_post_messages: Optional[bool] = None
can_edit_messages: Optional[bool] = None
can_delete_messages: Optional[bool] = None
can_invite_users: Optional[bool] = None
can_restrict_members: Optional[bool] = None
can_pin_messages: Optional[bool] = None
can_promote_members: Optional[bool] = None
@dataclass(frozen=True)
class SetChatAdministratorCustomTitleRequest:
"""Parameters for setting an administrator's custom title."""
chat_id: Union[ChatID, str]
user_id: UserID
custom_title: str
@dataclass(frozen=True)
class SetChatPermissionsRequest:
"""Parameters for setting the default chat permissions."""
chat_id: Union[ChatID, str]
permissions: JSONSerialized[ChatPermissions]
@dataclass(frozen=True)
class ExportChatInviteLinkRequest:
"""Parameters for exporting a chat's invite link."""
chat_id: Union[ChatID, str]
@dataclass(frozen=True)
class SetChatPhotoRequest:
"""Parameters for setting a chat's photo."""
chat_id: Union[ChatID, str]
photo: InputFile
@dataclass(frozen=True)
class DeleteChatPhotoRequest:
"""Parameters for deleting a chat's photo."""
chat_id: Union[ChatID, str]
@dataclass(frozen=True)
class SetChatTitleRequest:
"""Parameters for setting a chat's title."""
chat_id: Union[ChatID, str]
title: str
@dataclass(frozen=True)
class SetChatDescriptionRequest:
"""Parameters for setting a chat's description."""
chat_id: Union[ChatID, str]
description: str
@dataclass(frozen=True)
class PinChatMessageRequest:
"""Parameters for pinning a message in a chat."""
chat_id: Union[ChatID, str]
message_id: MessageID
disable_notification: Optional[bool] = None
@dataclass(frozen=True)
class UnpinChatMessageRequest:
"""Parameters for unpinning the message from a chat."""
chat_id: Union[ChatID, str]
@dataclass(frozen=True)
class LeaveChatRequest:
"""Parameters for leaving a chat."""
chat_id: Union[ChatID, str]
@dataclass(frozen=True)
class GetChatRequest:
"""Parameters for getting information about a chat."""
chat_id: Union[ChatID, str]
@dataclass(frozen=True)
class GetChatAdministratorsRequest:
"""Parameters for getting the administrators of a chat."""
chat_id: Union[ChatID, str]
@dataclass(frozen=True)
class GetChatMembersCountRequest:
"""Parameters for getting the number of members in a chat."""
chat_id: Union[ChatID, str]
@dataclass(frozen=True)
class GetChatMemberRequest:
"""Parameters for getting information about a member in a chat."""
chat_id: Union[ChatID, str]
user_id: UserID
@dataclass(frozen=True)
class SetChatStickerSetRequest:
"""Parameters for setting the sticker set of a supergroup."""
chat_id: Union[ChatID, str]
sticker_set_name: str
@dataclass(frozen=True)
class DeleteChatStickerSetRequest:
"""Parameters for deleting the sticker set of a supergroup."""
chat_id: Union[ChatID, str]
@dataclass(frozen=True)
class AnswerCallbackQueryRequest:
"""Parameters for answering a callback query from an inline keyboard."""
callback_query_id: CallbackQueryID
text: Optional[str] = None
show_alert: Optional[bool] = None
url: Optional[str] = None
cache_time: Optional[int] = None
@dataclass(frozen=True)
class SetMyCommandsRequest:
"""Parameters for setting a bot's command list."""
commands: JSONSerialized[List[BotCommand]]
@dataclass(frozen=True)
class EditMessageTextRequest:
"""Parameters for editing the text of a non-inline message."""
chat_id: Union[ChatID, str]
message_id: MessageID
text: str
parse_mode: Optional[ParseMode] = None
disable_web_page_preview: Optional[bool] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class EditInlineMessageTextRequest:
"""Parameters for editing the text of an inline message."""
inline_message_id: InlineMessageID
text: str
parse_mode: Optional[ParseMode] = None
disable_web_page_preview: Optional[bool] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class EditMessageCaptionRequest:
"""Parameters for editing the caption of a non-inline message."""
chat_id: Union[ChatID, str]
message_id: MessageID
caption: Optional[str] = None
parse_mode: Optional[ParseMode] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class EditInlineMessageCaptionRequest:
"""Parameters for editing the caption of an inline message."""
inline_message_id: InlineMessageID
caption: Optional[str] = None
parse_mode: Optional[ParseMode] = None
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class EditMessageMediaRequest:
"""Parameters for editing the media of a non-inline message."""
chat_id: Union[ChatID, str]
message_id: MessageID
media: JSONSerialized[InputMedia]
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class EditInlineMessageMediaRequest:
"""Parameters for editing the media of an inline message."""
inline_message_id: InlineMessageID
media: JSONSerialized[InputMedia]
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class EditMessageReplyMarkupRequest:
"""Parameters for editing the reply markup of a non-inline message."""
chat_id: Union[ChatID, str]
message_id: MessageID
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class EditInlineMessageReplyMarkupRequest:
"""Parameters for editing the reply markup of an inline message."""
inline_message_id: InlineMessageID
reply_markup: Optional[JSONSerialized[ReplyMarkup]] = None
@dataclass(frozen=True)
class DeleteMessageRequest:
"""Parameters for deleting a message."""
chat_id: Union[ChatID, str]
message_id: MessageID | /roboto-telegram-0.4.0.tar.gz/roboto-telegram-0.4.0/roboto/request_types.py | 0.91283 | 0.171546 | request_types.py | pypi |
from __future__ import annotations
import logging
import os
from dataclasses import asdict, dataclass, replace
from pathlib import PurePath
from typing import Final, Iterable, Mapping
import toml
from experimental.scie.config import Command, Config, File, Interpreter, LiftConfig
from experimental.scie.subsystems import Science
from experimental.scie.target_types import (
ScieBinaryNameField,
ScieDependenciesField,
ScieLiftSourceField,
SciePlatformField,
)
from pants.backend.python.util_rules.pex_from_targets import (
InterpreterConstraintsRequest,
)
from pants.core.goals.package import BuiltPackage, BuiltPackageArtifact, PackageFieldSet
from pants.core.goals.run import RunFieldSet, RunInSandboxBehavior, RunRequest
from pants.core.target_types import EnvironmentAwarePackageRequest
from pants.core.util_rules.external_tool import (
DownloadedExternalTool,
ExternalToolRequest,
)
from pants.engine.fs import (
EMPTY_DIGEST,
CreateDigest,
Digest,
DigestContents,
FileContent,
MergeDigests,
Snapshot,
)
from pants.engine.platform import Platform
from pants.engine.process import Process, ProcessResult
from pants.engine.rules import Get, MultiGet, Rule, collect_rules, rule, rule_helper
from pants.engine.target import (
DependenciesRequest,
DescriptionField,
FieldSetsPerTarget,
FieldSetsPerTargetRequest,
HydratedSources,
HydrateSourcesRequest,
Targets,
)
from pants.engine.unions import UnionRule
from pants.init.plugin_resolver import InterpreterConstraints
from pants.util.logging import LogLevel
logger = logging.getLogger(__name__)
DEFAULT_LIFT_PATH: Final[str] = "lift.toml"
@dataclass(frozen=True)
class ScieFieldSet(PackageFieldSet, RunFieldSet):
required_fields = (ScieDependenciesField,)
run_in_sandbox_behavior = RunInSandboxBehavior.RUN_REQUEST_HERMETIC
binary_name: ScieBinaryNameField
description: DescriptionField
dependencies: ScieDependenciesField
platforms: SciePlatformField
lift: ScieLiftSourceField
@rule_helper
async def _get_interpreter_config(targets: Targets) -> Interpreter:
# Get the interpreter_constraints for the Pex to determine which version of the Python Standalone to use
constraints = await Get(
InterpreterConstraints,
InterpreterConstraintsRequest([tgt.address for tgt in targets]),
)
# TODO: Pull the interpreter_universe from somewhere else (Python Build standalone?)
minimum_version = constraints.minimum_python_version(["3.8", "3.9", "3.10", "3.11"])
assert minimum_version is not None, "No minimum python version found"
# Create a toml configuration from the input targets and the minimum_version
return Interpreter(version=minimum_version)
def _get_target_platforms(
platforms: tuple[str, ...] | None,
platform_mapping: Mapping[str, str],
host_platform: Platform,
) -> list[str]:
if platforms:
return list(platforms)
return [platform_mapping.get(host_platform.value, "")]
def _get_files_config(built_packages: Iterable[BuiltPackage]) -> Iterable[File]:
# Enumerate the files to add to the configuration
artifact_names = [
PurePath(artifact.relpath)
for built_pkg in built_packages
for artifact in built_pkg.artifacts
if artifact.relpath is not None
]
return [File(str(path)) for path in artifact_names]
def _contains_pex(built_package: BuiltPackage) -> bool:
return any(
artifact.relpath is not None and artifact.relpath.endswith(".pex")
for artifact in built_package.artifacts
)
@rule_helper
async def _parse_lift_source(source: ScieLiftSourceField) -> Config:
hydrated_source = await Get(HydratedSources, HydrateSourcesRequest(source))
digest_contents = await Get(DigestContents, Digest, hydrated_source.snapshot.digest)
content = digest_contents[0].content.decode("utf-8")
lift_toml = toml.loads(content)
logger.error(lift_toml)
return Config(**lift_toml)
@rule(level=LogLevel.DEBUG)
async def scie_binary(
science: Science,
field_set: ScieFieldSet,
platform: Platform,
) -> BuiltPackage:
# Grab the dependencies of this target, and build them
direct_deps = await Get(Targets, DependenciesRequest(field_set.dependencies))
deps_field_sets = await Get(
FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, direct_deps)
)
built_packages = await MultiGet(
Get(BuiltPackage, EnvironmentAwarePackageRequest(field_set))
for field_set in deps_field_sets.field_sets
)
# Split the built packages into .pex and non-.pex packages
pex_packages = [
built_pkg for built_pkg in built_packages if _contains_pex(built_pkg)
]
non_pex_packages = [
built_pkg for built_pkg in built_packages if not _contains_pex(built_pkg)
]
# Ensure that there is exactly 1 .pex file - reduces complexity of this plugin for now
assert (
len(pex_packages) == 1
), f"Expected exactly 1 .pex package, but found {len(pex_packages)}"
pex_package = pex_packages[0]
# Ensure there is only 1 .pex artifact in the .pex package
pex_artifacts = [
artifact
for artifact in pex_package.artifacts
if artifact.relpath is not None and artifact.relpath.endswith(".pex")
]
assert (
len(pex_artifacts) == 1
), f"Expected exactly 1 .pex artifact, but found {len(pex_artifacts)}"
pex_artifact = pex_artifacts[0]
assert (
pex_artifact.relpath is not None
), "Expected single .pex artifact to have a relpath"
pex_artifact_path = PurePath(pex_artifact.relpath)
# Prepare the configuration toml for the Science tool
binary_name = field_set.binary_name.value or field_set.address.target_name
assert science.default_url_platform_mapping is not None
target_platforms = _get_target_platforms(
field_set.platforms.value, science.default_url_platform_mapping, platform
)
interpreter_config = await _get_interpreter_config(direct_deps)
# TODO: This might be better solved by using the `:target_name` syntax and letting downstream handle it
files_config = _get_files_config(built_packages)
# Create a toml configuration from the input targets and the minimum_version, and place that into a Digest for later usage
generated_config = Config(
lift=LiftConfig(
name=binary_name,
description=field_set.description.value or "",
platforms=list(target_platforms),
interpreters=[interpreter_config],
files=list(files_config),
commands=[
Command(exe="#{cpython:python}", args=[f"{{{ pex_artifact_path }}}"])
],
)
)
parsed_config: Config | None = None
lift_digest = EMPTY_DIGEST
lift_path = DEFAULT_LIFT_PATH
if field_set.lift.value is not None:
# If the user specified a lift.toml file, then use that instead of the generated one
parsed_config = await _parse_lift_source(field_set.lift)
assert field_set.lift.file_path is not None
lift_path = field_set.lift.file_path
# TODO: Merge the parsed config with the generated config, rather than replacing it
config = parsed_config or generated_config
config_content = toml.dumps(asdict(config)).encode()
lift_digest = await Get(
Digest, CreateDigest([FileContent(lift_path, config_content)])
)
# Download the Science tool for this platform
downloaded_tool = await Get(
DownloadedExternalTool, ExternalToolRequest, science.get_request(platform)
)
# Put the dependencies and toml configuration into a digest
input_digest = await Get(
Digest,
MergeDigests(
(
lift_digest,
downloaded_tool.digest,
*(pkg.digest for pkg in non_pex_packages),
pex_package.digest,
)
),
)
# The output files are based on the config.lift.name key and each of the platforms (if specified), otherwise just the config.lift.name for native-only
output_files = [config.lift.name] + [
f"{config.lift.name}-{platform}" for platform in config.lift.platforms
]
# If any of the config filenames start with `:` then add a filemapping command line arg in the form --file NAME=LOCATION
file_mappings = [
f"--file {file.name}={pex_artifact_path}"
for file in config.lift.files
if file.name.startswith(":")
]
# Split each file mapping into a list of arguments
file_mappings = [arg for mapping in file_mappings for arg in mapping.split(" ")]
logger.warning(file_mappings)
# Run science to generate the scie binaries (depending on the `platforms` setting)
argv = (
downloaded_tool.exe,
"lift",
*file_mappings,
"build",
"--use-platform-suffix" if config.lift.platforms else "",
lift_path,
)
process = Process(
argv=argv,
input_digest=input_digest,
description="Run science on the input digests",
output_files=output_files,
level=LogLevel.DEBUG,
)
result = await Get(ProcessResult, Process, process)
snapshot = await Get(
Snapshot,
Digest,
result.output_digest,
)
return BuiltPackage(
result.output_digest,
artifacts=tuple(BuiltPackageArtifact(file) for file in snapshot.files),
)
@rule
async def run_scie_binary(field_set: ScieFieldSet) -> RunRequest:
"""After packaging, the scie-jump plugin will place the executable in a location like this:
dist/{binary name}
{binary name} will default to `target_name`, but can be modified on the `scie_binary` target.
"""
binary = await Get(BuiltPackage, PackageFieldSet, field_set)
assert (
len(binary.artifacts) == 1
), "`scie_binary` should only generate one output package"
artifact = binary.artifacts[0]
assert artifact.relpath is not None
return RunRequest(
digest=binary.digest, args=(os.path.join("{chroot}", artifact.relpath),)
)
def rules() -> Iterable[Rule | UnionRule]:
return (
*collect_rules(),
UnionRule(PackageFieldSet, ScieFieldSet),
*ScieFieldSet.rules(),
) | /robotpajamas.pants.scie-0.0.2-py3-none-any.whl/experimental/scie/rules.py | 0.593727 | 0.185947 | rules.py | pypi |
from __future__ import annotations
import logging
from dataclasses import dataclass
import toml
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class Config:
lift: LiftConfig
def __post_init__(self):
if isinstance(self.lift, dict):
logger.warning(f"LIFT: {self.lift}")
object.__setattr__(self, "lift", LiftConfig(**self.lift))
@classmethod
def from_toml(cls, file_path):
with open(file_path) as f:
data = toml.load(f)
return cls(**data)
@dataclass(frozen=True)
class LiftConfig:
"""
This configuration is a subset of the configuration that can be found here:
https://github.com/a-scie/lift/blob/main/science/model.py
"""
name: str
description: str
platforms: list[str]
interpreters: list[Interpreter]
files: list[File]
commands: list[Command]
bindings: frozenset[Command] = frozenset()
def __post_init__(self):
if any(isinstance(i, dict) for i in self.interpreters):
object.__setattr__(self, "interpreters", [Interpreter(**i) for i in self.interpreters]) # type: ignore
if any(isinstance(f, dict) for f in self.files):
object.__setattr__(self, "files", [File(**f) for f in self.files]) # type: ignore
if any(isinstance(c, dict) for c in self.commands):
object.__setattr__(self, "commands", [Command(**c) for c in self.commands]) # type: ignore
if any(isinstance(b, dict) for b in self.bindings):
object.__setattr__(self, "bindings", [Command(**b) for b in self.bindings]) # type: ignore
@dataclass(frozen=True)
class Interpreter:
version: str
id: str = "cpython"
provider: str = "PythonBuildStandalone"
release: str = "20230507"
lazy: bool = True
@dataclass(frozen=True)
class File:
name: str
@dataclass(frozen=True)
class Command:
exe: str
args: list[str]
env: dict[str, str] | None = None
name: str | None = None
description: str | None = None | /robotpajamas.pants.scie-0.0.2-py3-none-any.whl/experimental/scie/config.py | 0.849113 | 0.183082 | config.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def get_normal(vertices, triangles):
''' calculate normal direction in each vertex
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
Returns:
normal: [nver, 3]
'''
pt0 = vertices[triangles[:, 0], :] # [ntri, 3]
pt1 = vertices[triangles[:, 1], :] # [ntri, 3]
pt2 = vertices[triangles[:, 2], :] # [ntri, 3]
tri_normal = np.cross(pt0 - pt1, pt0 - pt2) # [ntri, 3]. normal of each triangle
normal = np.zeros_like(vertices) # [nver, 3]
for i in range(triangles.shape[0]):
normal[triangles[i, 0], :] = normal[triangles[i, 0], :] + tri_normal[i, :]
normal[triangles[i, 1], :] = normal[triangles[i, 1], :] + tri_normal[i, :]
normal[triangles[i, 2], :] = normal[triangles[i, 2], :] + tri_normal[i, :]
# normalize to unit length
mag = np.sum(normal**2, 1) # [nver]
zero_ind = (mag == 0)
mag[zero_ind] = 1;
normal[zero_ind, 0] = np.ones((np.sum(zero_ind)))
normal = normal/np.sqrt(mag[:,np.newaxis])
return normal
# TODO: test
def add_light_sh(vertices, triangles, colors, sh_coeff):
'''
In 3d face, usually assume:
1. The surface of face is Lambertian(reflect only the low frequencies of lighting)
2. Lighting can be an arbitrary combination of point sources
--> can be expressed in terms of spherical harmonics(omit the lighting coefficients)
I = albedo * (sh(n) x sh_coeff)
albedo: n x 1
sh_coeff: 9 x 1
Y(n) = (1, n_x, n_y, n_z, n_xn_y, n_xn_z, n_yn_z, n_x^2 - n_y^2, 3n_z^2 - 1)': n x 9
# Y(n) = (1, n_x, n_y, n_z)': n x 4
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
colors: [nver, 3] albedo
sh_coeff: [9, 1] spherical harmonics coefficients
Returns:
lit_colors: [nver, 3]
'''
assert vertices.shape[0] == colors.shape[0]
nver = vertices.shape[0]
normal = get_normal(vertices, triangles) # [nver, 3]
sh = np.array((np.ones(nver), n[:,0], n[:,1], n[:,2], n[:,0]*n[:,1], n[:,0]*n[:,2], n[:,1]*n[:,2], n[:,0]**2 - n[:,1]**2, 3*(n[:,2]**2) - 1)) # [nver, 9]
ref = sh.dot(sh_coeff) #[nver, 1]
lit_colors = colors*ref
return lit_colors
def add_light(vertices, triangles, colors, light_positions = 0, light_intensities = 0):
''' Gouraud shading. add point lights.
In 3d face, usually assume:
1. The surface of face is Lambertian(reflect only the low frequencies of lighting)
2. Lighting can be an arbitrary combination of point sources
3. No specular (unless skin is oil, 23333)
Ref: https://cs184.eecs.berkeley.edu/lecture/pipeline
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
light_positions: [nlight, 3]
light_intensities: [nlight, 3]
Returns:
lit_colors: [nver, 3]
'''
nver = vertices.shape[0]
normals = get_normal(vertices, triangles) # [nver, 3]
# ambient
# La = ka*Ia
# diffuse
# Ld = kd*(I/r^2)max(0, nxl)
direction_to_lights = vertices[np.newaxis, :, :] - light_positions[:, np.newaxis, :] # [nlight, nver, 3]
direction_to_lights_n = np.sqrt(np.sum(direction_to_lights**2, axis = 2)) # [nlight, nver]
direction_to_lights = direction_to_lights/direction_to_lights_n[:, :, np.newaxis]
normals_dot_lights = normals[np.newaxis, :, :]*direction_to_lights # [nlight, nver, 3]
normals_dot_lights = np.sum(normals_dot_lights, axis = 2) # [nlight, nver]
diffuse_output = colors[np.newaxis, :, :]*normals_dot_lights[:, :, np.newaxis]*light_intensities[:, np.newaxis, :]
diffuse_output = np.sum(diffuse_output, axis = 0) # [nver, 3]
# specular
# h = (v + l)/(|v + l|) bisector
# Ls = ks*(I/r^2)max(0, nxh)^p
# increasing p narrows the reflectionlob
lit_colors = diffuse_output # only diffuse part here.
lit_colors = np.minimum(np.maximum(lit_colors, 0), 1)
return lit_colors
## TODO. estimate light(sh coeff)
## -------------------------------- estimate. can not use now.
def fit_light(image, vertices, colors, triangles, vis_ind, lamb = 10, max_iter = 3):
[h, w, c] = image.shape
# surface normal
norm = get_normal(vertices, triangles)
nver = vertices.shape[1]
# vertices --> corresponding image pixel
pt2d = vertices[:2, :]
pt2d[0,:] = np.minimum(np.maximum(pt2d[0,:], 0), w - 1)
pt2d[1,:] = np.minimum(np.maximum(pt2d[1,:], 0), h - 1)
pt2d = np.round(pt2d).astype(np.int32) # 2 x nver
image_pixel = image[pt2d[1,:], pt2d[0,:], :] # nver x 3
image_pixel = image_pixel.T # 3 x nver
# vertices --> corresponding mean texture pixel with illumination
# Spherical Harmonic Basis
harmonic_dim = 9
nx = norm[0,:];
ny = norm[1,:];
nz = norm[2,:];
harmonic = np.zeros((nver, harmonic_dim))
pi = np.pi
harmonic[:,0] = np.sqrt(1/(4*pi)) * np.ones((nver,));
harmonic[:,1] = np.sqrt(3/(4*pi)) * nx;
harmonic[:,2] = np.sqrt(3/(4*pi)) * ny;
harmonic[:,3] = np.sqrt(3/(4*pi)) * nz;
harmonic[:,4] = 1/2. * np.sqrt(3/(4*pi)) * (2*nz**2 - nx**2 - ny**2);
harmonic[:,5] = 3 * np.sqrt(5/(12*pi)) * (ny*nz);
harmonic[:,6] = 3 * np.sqrt(5/(12*pi)) * (nx*nz);
harmonic[:,7] = 3 * np.sqrt(5/(12*pi)) * (nx*ny);
harmonic[:,8] = 3/2. * np.sqrt(5/(12*pi)) * (nx*nx - ny*ny);
'''
I' = sum(albedo * lj * hj) j = 0:9 (albedo = tex)
set A = albedo*h (n x 9)
alpha = lj (9 x 1)
Y = I (n x 1)
Y' = A.dot(alpha)
opt function:
||Y - A*alpha|| + lambda*(alpha'*alpha)
result:
A'*(Y - A*alpha) + lambda*alpha = 0
==>
(A'*A*alpha - lambda)*alpha = A'*Y
left: 9 x 9
right: 9 x 1
'''
n_vis_ind = len(vis_ind)
n = n_vis_ind*c
Y = np.zeros((n, 1))
A = np.zeros((n, 9))
light = np.zeros((3, 1))
for k in range(c):
Y[k*n_vis_ind:(k+1)*n_vis_ind, :] = image_pixel[k, vis_ind][:, np.newaxis]
A[k*n_vis_ind:(k+1)*n_vis_ind, :] = texture[k, vis_ind][:, np.newaxis] * harmonic[vis_ind, :]
Ac = texture[k, vis_ind][:, np.newaxis]
Yc = image_pixel[k, vis_ind][:, np.newaxis]
light[k] = (Ac.T.dot(Yc))/(Ac.T.dot(Ac))
for i in range(max_iter):
Yc = Y.copy()
for k in range(c):
Yc[k*n_vis_ind:(k+1)*n_vis_ind, :] /= light[k]
# update alpha
equation_left = np.dot(A.T, A) + lamb*np.eye(harmonic_dim); # why + ?
equation_right = np.dot(A.T, Yc)
alpha = np.dot(np.linalg.inv(equation_left), equation_right)
# update light
for k in range(c):
Ac = A[k*n_vis_ind:(k+1)*n_vis_ind, :].dot(alpha)
Yc = Y[k*n_vis_ind:(k+1)*n_vis_ind, :]
light[k] = (Ac.T.dot(Yc))/(Ac.T.dot(Ac))
appearance = np.zeros_like(texture)
for k in range(c):
tmp = np.dot(harmonic*texture[k, :][:, np.newaxis], alpha*light[k])
appearance[k,:] = tmp.T
appearance = np.minimum(np.maximum(appearance, 0), 1)
return appearance | /robotpipe-0.0.4-cp39-cp39-win_amd64.whl/heimarobot/insightface/thirdparty/face3d/mesh_numpy/light.py | 0.723016 | 0.615579 | light.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
from skimage import io
## TODO
## TODO: c++ version
def read_obj(obj_name):
''' read mesh
'''
return 0
# ------------------------- write
def write_asc(path, vertices):
'''
Args:
vertices: shape = (nver, 3)
'''
if path.split('.')[-1] == 'asc':
np.savetxt(path, vertices)
else:
np.savetxt(path + '.asc', vertices)
def write_obj_with_colors(obj_name, vertices, triangles, colors):
''' Save 3D face model with texture represented by colors.
Args:
obj_name: str
vertices: shape = (nver, 3)
triangles: shape = (ntri, 3)
colors: shape = (nver, 3)
'''
triangles = triangles.copy()
triangles += 1 # meshlab start with 1
if obj_name.split('.')[-1] != 'obj':
obj_name = obj_name + '.obj'
# write obj
with open(obj_name, 'w') as f:
# write vertices & colors
for i in range(vertices.shape[0]):
# s = 'v {} {} {} \n'.format(vertices[0,i], vertices[1,i], vertices[2,i])
s = 'v {} {} {} {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2], colors[i, 0], colors[i, 1], colors[i, 2])
f.write(s)
# write f: ver ind/ uv ind
[k, ntri] = triangles.shape
for i in range(triangles.shape[0]):
# s = 'f {} {} {}\n'.format(triangles[i, 0], triangles[i, 1], triangles[i, 2])
s = 'f {} {} {}\n'.format(triangles[i, 2], triangles[i, 1], triangles[i, 0])
f.write(s)
## TODO: c++ version
def write_obj_with_texture(obj_name, vertices, triangles, texture, uv_coords):
''' Save 3D face model with texture represented by texture map.
Ref: https://github.com/patrikhuber/eos/blob/bd00155ebae4b1a13b08bf5a991694d682abbada/include/eos/core/Mesh.hpp
Args:
obj_name: str
vertices: shape = (nver, 3)
triangles: shape = (ntri, 3)
texture: shape = (256,256,3)
uv_coords: shape = (nver, 3) max value<=1
'''
if obj_name.split('.')[-1] != 'obj':
obj_name = obj_name + '.obj'
mtl_name = obj_name.replace('.obj', '.mtl')
texture_name = obj_name.replace('.obj', '_texture.png')
triangles = triangles.copy()
triangles += 1 # mesh lab start with 1
# write obj
with open(obj_name, 'w') as f:
# first line: write mtlib(material library)
s = "mtllib {}\n".format(os.path.abspath(mtl_name))
f.write(s)
# write vertices
for i in range(vertices.shape[0]):
s = 'v {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2])
f.write(s)
# write uv coords
for i in range(uv_coords.shape[0]):
# s = 'vt {} {}\n'.format(uv_coords[i,0], 1 - uv_coords[i,1])
s = 'vt {} {}\n'.format(uv_coords[i,0], uv_coords[i,1])
f.write(s)
f.write("usemtl FaceTexture\n")
# write f: ver ind/ uv ind
for i in range(triangles.shape[0]):
s = 'f {}/{} {}/{} {}/{}\n'.format(triangles[i,2], triangles[i,2], triangles[i,1], triangles[i,1], triangles[i,0], triangles[i,0])
f.write(s)
# write mtl
with open(mtl_name, 'w') as f:
f.write("newmtl FaceTexture\n")
s = 'map_Kd {}\n'.format(os.path.abspath(texture_name)) # map to image
f.write(s)
# write texture as png
imsave(texture_name, texture)
def write_obj_with_colors_texture(obj_name, vertices, triangles, colors, texture, uv_coords):
''' Save 3D face model with texture.
Ref: https://github.com/patrikhuber/eos/blob/bd00155ebae4b1a13b08bf5a991694d682abbada/include/eos/core/Mesh.hpp
Args:
obj_name: str
vertices: shape = (nver, 3)
triangles: shape = (ntri, 3)
colors: shape = (nver, 3)
texture: shape = (256,256,3)
uv_coords: shape = (nver, 3) max value<=1
'''
if obj_name.split('.')[-1] != 'obj':
obj_name = obj_name + '.obj'
mtl_name = obj_name.replace('.obj', '.mtl')
texture_name = obj_name.replace('.obj', '_texture.png')
triangles = triangles.copy()
triangles += 1 # mesh lab start with 1
# write obj
with open(obj_name, 'w') as f:
# first line: write mtlib(material library)
s = "mtllib {}\n".format(os.path.abspath(mtl_name))
f.write(s)
# write vertices
for i in range(vertices.shape[0]):
s = 'v {} {} {} {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2], colors[i, 0], colors[i, 1], colors[i, 2])
f.write(s)
# write uv coords
for i in range(uv_coords.shape[0]):
# s = 'vt {} {}\n'.format(uv_coords[i,0], 1 - uv_coords[i,1])
s = 'vt {} {}\n'.format(uv_coords[i,0], uv_coords[i,1])
f.write(s)
f.write("usemtl FaceTexture\n")
# write f: ver ind/ uv ind
for i in range(triangles.shape[0]):
# s = 'f {}/{} {}/{} {}/{}\n'.format(triangles[i,0], triangles[i,0], triangles[i,1], triangles[i,1], triangles[i,2], triangles[i,2])
s = 'f {}/{} {}/{} {}/{}\n'.format(triangles[i,2], triangles[i,2], triangles[i,1], triangles[i,1], triangles[i,0], triangles[i,0])
f.write(s)
# write mtl
with open(mtl_name, 'w') as f:
f.write("newmtl FaceTexture\n")
s = 'map_Kd {}\n'.format(os.path.abspath(texture_name)) # map to image
f.write(s)
# write texture as png
io.imsave(texture_name, texture) | /robotpipe-0.0.4-cp39-cp39-win_amd64.whl/heimarobot/insightface/thirdparty/face3d/mesh_numpy/io.py | 0.460046 | 0.224778 | io.py | pypi |
import numpy as np
from .. import mesh
''' TODO: a clear document.
Given: image_points, 3D Model, Camera Matrix(s, R, t2d)
Estimate: shape parameters, expression parameters
Inference:
projected_vertices = s*P*R(mu + shape + exp) + t2d --> image_points
s*P*R*shape + s*P*R(mu + exp) + t2d --> image_poitns
# Define:
X = vertices
x_hat = projected_vertices
x = image_points
A = s*P*R
b = s*P*R(mu + exp) + t2d
==>
x_hat = A*shape + b (2 x n)
A*shape (2 x n)
shape = reshape(shapePC * sp) (3 x n)
shapePC*sp : (3n x 1)
* flatten:
x_hat_flatten = A*shape + b_flatten (2n x 1)
A*shape (2n x 1)
--> A*shapePC (2n x 199) sp: 199 x 1
# Define:
pc_2d = A* reshape(shapePC)
pc_2d_flatten = flatten(pc_2d) (2n x 199)
=====>
x_hat_flatten = pc_2d_flatten * sp + b_flatten ---> x_flatten (2n x 1)
Goals:
(ignore flatten, pc_2d-->pc)
min E = || x_hat - x || + lambda*sum(sp/sigma)^2
= || pc * sp + b - x || + lambda*sum(sp/sigma)^2
Solve:
d(E)/d(sp) = 0
2 * pc' * (pc * sp + b - x) + 2 * lambda * sp / (sigma' * sigma) = 0
Get:
(pc' * pc + lambda / (sigma'* sigma)) * sp = pc' * (x - b)
'''
def estimate_shape(x, shapeMU, shapePC, shapeEV, expression, s, R, t2d, lamb = 3000):
'''
Args:
x: (2, n). image points (to be fitted)
shapeMU: (3n, 1)
shapePC: (3n, n_sp)
shapeEV: (n_sp, 1)
expression: (3, n)
s: scale
R: (3, 3). rotation matrix
t2d: (2,). 2d translation
lambda: regulation coefficient
Returns:
shape_para: (n_sp, 1) shape parameters(coefficients)
'''
x = x.copy()
assert(shapeMU.shape[0] == shapePC.shape[0])
assert(shapeMU.shape[0] == x.shape[1]*3)
dof = shapePC.shape[1]
n = x.shape[1]
sigma = shapeEV
t2d = np.array(t2d)
P = np.array([[1, 0, 0], [0, 1, 0]], dtype = np.float32)
A = s*P.dot(R)
# --- calc pc
pc_3d = np.resize(shapePC.T, [dof, n, 3]) # 199 x n x 3
pc_3d = np.reshape(pc_3d, [dof*n, 3])
pc_2d = pc_3d.dot(A.T.copy()) # 199 x n x 2
pc = np.reshape(pc_2d, [dof, -1]).T # 2n x 199
# --- calc b
# shapeMU
mu_3d = np.resize(shapeMU, [n, 3]).T # 3 x n
# expression
exp_3d = expression
#
b = A.dot(mu_3d + exp_3d) + np.tile(t2d[:, np.newaxis], [1, n]) # 2 x n
b = np.reshape(b.T, [-1, 1]) # 2n x 1
# --- solve
equation_left = np.dot(pc.T, pc) + lamb * np.diagflat(1/sigma**2)
x = np.reshape(x.T, [-1, 1])
equation_right = np.dot(pc.T, x - b)
shape_para = np.dot(np.linalg.inv(equation_left), equation_right)
return shape_para
def estimate_expression(x, shapeMU, expPC, expEV, shape, s, R, t2d, lamb = 2000):
'''
Args:
x: (2, n). image points (to be fitted)
shapeMU: (3n, 1)
expPC: (3n, n_ep)
expEV: (n_ep, 1)
shape: (3, n)
s: scale
R: (3, 3). rotation matrix
t2d: (2,). 2d translation
lambda: regulation coefficient
Returns:
exp_para: (n_ep, 1) shape parameters(coefficients)
'''
x = x.copy()
assert(shapeMU.shape[0] == expPC.shape[0])
assert(shapeMU.shape[0] == x.shape[1]*3)
dof = expPC.shape[1]
n = x.shape[1]
sigma = expEV
t2d = np.array(t2d)
P = np.array([[1, 0, 0], [0, 1, 0]], dtype = np.float32)
A = s*P.dot(R)
# --- calc pc
pc_3d = np.resize(expPC.T, [dof, n, 3])
pc_3d = np.reshape(pc_3d, [dof*n, 3])
pc_2d = pc_3d.dot(A.T)
pc = np.reshape(pc_2d, [dof, -1]).T # 2n x 29
# --- calc b
# shapeMU
mu_3d = np.resize(shapeMU, [n, 3]).T # 3 x n
# expression
shape_3d = shape
#
b = A.dot(mu_3d + shape_3d) + np.tile(t2d[:, np.newaxis], [1, n]) # 2 x n
b = np.reshape(b.T, [-1, 1]) # 2n x 1
# --- solve
equation_left = np.dot(pc.T, pc) + lamb * np.diagflat(1/sigma**2)
x = np.reshape(x.T, [-1, 1])
equation_right = np.dot(pc.T, x - b)
exp_para = np.dot(np.linalg.inv(equation_left), equation_right)
return exp_para
# ---------------- fit
def fit_points(x, X_ind, model, n_sp, n_ep, max_iter = 4):
'''
Args:
x: (n, 2) image points
X_ind: (n,) corresponding Model vertex indices
model: 3DMM
max_iter: iteration
Returns:
sp: (n_sp, 1). shape parameters
ep: (n_ep, 1). exp parameters
s, R, t
'''
x = x.copy().T
#-- init
sp = np.zeros((n_sp, 1), dtype = np.float32)
ep = np.zeros((n_ep, 1), dtype = np.float32)
#-------------------- estimate
X_ind_all = np.tile(X_ind[np.newaxis, :], [3, 1])*3
X_ind_all[1, :] += 1
X_ind_all[2, :] += 2
valid_ind = X_ind_all.flatten('F')
shapeMU = model['shapeMU'][valid_ind, :]
shapePC = model['shapePC'][valid_ind, :n_sp]
expPC = model['expPC'][valid_ind, :n_ep]
for i in range(max_iter):
X = shapeMU + shapePC.dot(sp) + expPC.dot(ep)
X = np.reshape(X, [int(len(X)/3), 3]).T
#----- estimate pose
P = mesh.transform.estimate_affine_matrix_3d22d(X.T, x.T)
s, R, t = mesh.transform.P2sRt(P)
rx, ry, rz = mesh.transform.matrix2angle(R)
#print('Iter:{}; estimated pose: s {}, rx {}, ry {}, rz {}, t1 {}, t2 {}'.format(i, s, rx, ry, rz, t[0], t[1]))
#----- estimate shape
# expression
shape = shapePC.dot(sp)
shape = np.reshape(shape, [int(len(shape)/3), 3]).T
ep = estimate_expression(x, shapeMU, expPC, model['expEV'][:n_ep,:], shape, s, R, t[:2], lamb = 20)
# shape
expression = expPC.dot(ep)
expression = np.reshape(expression, [int(len(expression)/3), 3]).T
if i == 0 :
sp = estimate_shape(x, shapeMU, shapePC, model['shapeEV'][:n_sp,:], expression, s, R, t[:2], lamb = 40)
return sp, ep, s, R, t
# ---------------- fitting process
def fit_points_for_show(x, X_ind, model, n_sp, n_ep, max_iter = 4):
'''
Args:
x: (n, 2) image points
X_ind: (n,) corresponding Model vertex indices
model: 3DMM
max_iter: iteration
Returns:
sp: (n_sp, 1). shape parameters
ep: (n_ep, 1). exp parameters
s, R, t
'''
x = x.copy().T
#-- init
sp = np.zeros((n_sp, 1), dtype = np.float32)
ep = np.zeros((n_ep, 1), dtype = np.float32)
#-------------------- estimate
X_ind_all = np.tile(X_ind[np.newaxis, :], [3, 1])*3
X_ind_all[1, :] += 1
X_ind_all[2, :] += 2
valid_ind = X_ind_all.flatten('F')
shapeMU = model['shapeMU'][valid_ind, :]
shapePC = model['shapePC'][valid_ind, :n_sp]
expPC = model['expPC'][valid_ind, :n_ep]
s = 4e-04
R = mesh.transform.angle2matrix([0, 0, 0])
t = [0, 0, 0]
lsp = []; lep = []; ls = []; lR = []; lt = []
for i in range(max_iter):
X = shapeMU + shapePC.dot(sp) + expPC.dot(ep)
X = np.reshape(X, [int(len(X)/3), 3]).T
lsp.append(sp); lep.append(ep); ls.append(s), lR.append(R), lt.append(t)
#----- estimate pose
P = mesh.transform.estimate_affine_matrix_3d22d(X.T, x.T)
s, R, t = mesh.transform.P2sRt(P)
lsp.append(sp); lep.append(ep); ls.append(s), lR.append(R), lt.append(t)
#----- estimate shape
# expression
shape = shapePC.dot(sp)
shape = np.reshape(shape, [int(len(shape)/3), 3]).T
ep = estimate_expression(x, shapeMU, expPC, model['expEV'][:n_ep,:], shape, s, R, t[:2], lamb = 20)
lsp.append(sp); lep.append(ep); ls.append(s), lR.append(R), lt.append(t)
# shape
expression = expPC.dot(ep)
expression = np.reshape(expression, [int(len(expression)/3), 3]).T
sp = estimate_shape(x, shapeMU, shapePC, model['shapeEV'][:n_sp,:], expression, s, R, t[:2], lamb = 40)
# print('ls', ls)
# print('lR', lR)
return np.array(lsp), np.array(lep), np.array(ls), np.array(lR), np.array(lt) | /robotpipe-0.0.4-cp39-cp39-win_amd64.whl/heimarobot/insightface/thirdparty/face3d/morphable_model/fit.py | 0.478285 | 0.723578 | fit.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.io as sio
from .. import mesh
from . import fit
from . import load
class MorphabelModel(object):
"""docstring for MorphabelModel
model: nver: number of vertices. ntri: number of triangles. *: must have. ~: can generate ones array for place holder.
'shapeMU': [3*nver, 1]. *
'shapePC': [3*nver, n_shape_para]. *
'shapeEV': [n_shape_para, 1]. ~
'expMU': [3*nver, 1]. ~
'expPC': [3*nver, n_exp_para]. ~
'expEV': [n_exp_para, 1]. ~
'texMU': [3*nver, 1]. ~
'texPC': [3*nver, n_tex_para]. ~
'texEV': [n_tex_para, 1]. ~
'tri': [ntri, 3] (start from 1, should sub 1 in python and c++). *
'tri_mouth': [114, 3] (start from 1, as a supplement to mouth triangles). ~
'kpt_ind': [68,] (start from 1). ~
"""
def __init__(self, model_path, model_type = 'BFM'):
super( MorphabelModel, self).__init__()
if model_type=='BFM':
self.model = load.load_BFM(model_path)
else:
print('sorry, not support other 3DMM model now')
exit()
# fixed attributes
self.nver = self.model['shapePC'].shape[0]/3
self.ntri = self.model['tri'].shape[0]
self.n_shape_para = self.model['shapePC'].shape[1]
self.n_exp_para = self.model['expPC'].shape[1]
self.n_tex_para = self.model['texMU'].shape[1]
self.kpt_ind = self.model['kpt_ind']
self.triangles = self.model['tri']
self.full_triangles = np.vstack((self.model['tri'], self.model['tri_mouth']))
# ------------------------------------- shape: represented with mesh(vertices & triangles(fixed))
def get_shape_para(self, type = 'random'):
if type == 'zero':
sp = np.random.zeros((self.n_shape_para, 1))
elif type == 'random':
sp = np.random.rand(self.n_shape_para, 1)*1e04
return sp
def get_exp_para(self, type = 'random'):
if type == 'zero':
ep = np.zeros((self.n_exp_para, 1))
elif type == 'random':
ep = -1.5 + 3*np.random.random([self.n_exp_para, 1])
ep[6:, 0] = 0
return ep
def generate_vertices(self, shape_para, exp_para):
'''
Args:
shape_para: (n_shape_para, 1)
exp_para: (n_exp_para, 1)
Returns:
vertices: (nver, 3)
'''
vertices = self.model['shapeMU'] + self.model['shapePC'].dot(shape_para) + self.model['expPC'].dot(exp_para)
vertices = np.reshape(vertices, [int(3), int(len(vertices)/3)], 'F').T
return vertices
# -------------------------------------- texture: here represented with rgb value(colors) in vertices.
def get_tex_para(self, type = 'random'):
if type == 'zero':
tp = np.zeros((self.n_tex_para, 1))
elif type == 'random':
tp = np.random.rand(self.n_tex_para, 1)
return tp
def generate_colors(self, tex_para):
'''
Args:
tex_para: (n_tex_para, 1)
Returns:
colors: (nver, 3)
'''
colors = self.model['texMU'] + self.model['texPC'].dot(tex_para*self.model['texEV'])
colors = np.reshape(colors, [int(3), int(len(colors)/3)], 'F').T/255.
return colors
# ------------------------------------------- transformation
# ------------- transform
def rotate(self, vertices, angles):
''' rotate face
Args:
vertices: [nver, 3]
angles: [3] x, y, z rotation angle(degree)
x: pitch. positive for looking down
y: yaw. positive for looking left
z: roll. positive for tilting head right
Returns:
vertices: rotated vertices
'''
return mesh.transform.rotate(vertices, angles)
def transform(self, vertices, s, angles, t3d):
R = mesh.transform.angle2matrix(angles)
return mesh.transform.similarity_transform(vertices, s, R, t3d)
def transform_3ddfa(self, vertices, s, angles, t3d): # only used for processing 300W_LP data
R = mesh.transform.angle2matrix_3ddfa(angles)
return mesh.transform.similarity_transform(vertices, s, R, t3d)
# --------------------------------------------------- fitting
def fit(self, x, X_ind, max_iter = 4, isShow = False):
''' fit 3dmm & pose parameters
Args:
x: (n, 2) image points
X_ind: (n,) corresponding Model vertex indices
max_iter: iteration
isShow: whether to reserve middle results for show
Returns:
fitted_sp: (n_sp, 1). shape parameters
fitted_ep: (n_ep, 1). exp parameters
s, angles, t
'''
if isShow:
fitted_sp, fitted_ep, s, R, t = fit.fit_points_for_show(x, X_ind, self.model, n_sp = self.n_shape_para, n_ep = self.n_exp_para, max_iter = max_iter)
angles = np.zeros((R.shape[0], 3))
for i in range(R.shape[0]):
angles[i] = mesh.transform.matrix2angle(R[i])
else:
fitted_sp, fitted_ep, s, R, t = fit.fit_points(x, X_ind, self.model, n_sp = self.n_shape_para, n_ep = self.n_exp_para, max_iter = max_iter)
angles = mesh.transform.matrix2angle(R)
return fitted_sp, fitted_ep, s, angles, t | /robotpipe-0.0.4-cp39-cp39-win_amd64.whl/heimarobot/insightface/thirdparty/face3d/morphable_model/morphabel_model.py | 0.697918 | 0.307215 | morphabel_model.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.io as sio
### --------------------------------- load BFM data
def load_BFM(model_path):
''' load BFM 3DMM model
Args:
model_path: path to BFM model.
Returns:
model: (nver = 53215, ntri = 105840). nver: number of vertices. ntri: number of triangles.
'shapeMU': [3*nver, 1]
'shapePC': [3*nver, 199]
'shapeEV': [199, 1]
'expMU': [3*nver, 1]
'expPC': [3*nver, 29]
'expEV': [29, 1]
'texMU': [3*nver, 1]
'texPC': [3*nver, 199]
'texEV': [199, 1]
'tri': [ntri, 3] (start from 1, should sub 1 in python and c++)
'tri_mouth': [114, 3] (start from 1, as a supplement to mouth triangles)
'kpt_ind': [68,] (start from 1)
PS:
You can change codes according to your own saved data.
Just make sure the model has corresponding attributes.
'''
C = sio.loadmat(model_path)
model = C['model']
model = model[0,0]
# change dtype from double(np.float64) to np.float32,
# since big matrix process(espetially matrix dot) is too slow in python.
model['shapeMU'] = (model['shapeMU'] + model['expMU']).astype(np.float32)
model['shapePC'] = model['shapePC'].astype(np.float32)
model['shapeEV'] = model['shapeEV'].astype(np.float32)
model['expEV'] = model['expEV'].astype(np.float32)
model['expPC'] = model['expPC'].astype(np.float32)
# matlab start with 1. change to 0 in python.
model['tri'] = model['tri'].T.copy(order = 'C').astype(np.int32) - 1
model['tri_mouth'] = model['tri_mouth'].T.copy(order = 'C').astype(np.int32) - 1
# kpt ind
model['kpt_ind'] = (np.squeeze(model['kpt_ind']) - 1).astype(np.int32)
return model
def load_BFM_info(path = 'BFM_info.mat'):
''' load 3DMM model extra information
Args:
path: path to BFM info.
Returns:
model_info:
'symlist': 2 x 26720
'symlist_tri': 2 x 52937
'segbin': 4 x n (0: nose, 1: eye, 2: mouth, 3: cheek)
'segbin_tri': 4 x ntri
'face_contour': 1 x 28
'face_contour_line': 1 x 512
'face_contour_front': 1 x 28
'face_contour_front_line': 1 x 512
'nose_hole': 1 x 142
'nose_hole_right': 1 x 71
'nose_hole_left': 1 x 71
'parallel': 17 x 1 cell
'parallel_face_contour': 28 x 1 cell
'uv_coords': n x 2
'''
C = sio.loadmat(path)
model_info = C['model_info']
model_info = model_info[0,0]
return model_info
def load_uv_coords(path = 'BFM_UV.mat'):
''' load uv coords of BFM
Args:
path: path to data.
Returns:
uv_coords: [nver, 2]. range: 0-1
'''
C = sio.loadmat(path)
uv_coords = C['UV'].copy(order = 'C')
return uv_coords
def load_pncc_code(path = 'pncc_code.mat'):
''' load pncc code of BFM
PNCC code: Defined in 'Face Alignment Across Large Poses: A 3D Solution Xiangyu'
download at http://www.cbsr.ia.ac.cn/users/xiangyuzhu/projects/3DDFA/main.htm.
Args:
path: path to data.
Returns:
pncc_code: [nver, 3]
'''
C = sio.loadmat(path)
pncc_code = C['vertex_code'].T
return pncc_code
##
def get_organ_ind(model_info):
''' get nose, eye, mouth index
'''
valid_bin = model_info['segbin'].astype(bool)
organ_ind = np.nonzero(valid_bin[0,:])[0]
for i in range(1, valid_bin.shape[0] - 1):
organ_ind = np.union1d(organ_ind, np.nonzero(valid_bin[i,:])[0])
return organ_ind.astype(np.int32) | /robotpipe-0.0.4-cp39-cp39-win_amd64.whl/heimarobot/insightface/thirdparty/face3d/morphable_model/load.py | 0.748352 | 0.426859 | load.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
from skimage import io
from time import time
from .cython import mesh_core_cython
## TODO
## TODO: c++ version
def read_obj(obj_name):
''' read mesh
'''
return 0
# ------------------------- write
def write_asc(path, vertices):
'''
Args:
vertices: shape = (nver, 3)
'''
if path.split('.')[-1] == 'asc':
np.savetxt(path, vertices)
else:
np.savetxt(path + '.asc', vertices)
def write_obj_with_colors(obj_name, vertices, triangles, colors):
''' Save 3D face model with texture represented by colors.
Args:
obj_name: str
vertices: shape = (nver, 3)
triangles: shape = (ntri, 3)
colors: shape = (nver, 3)
'''
triangles = triangles.copy()
triangles += 1 # meshlab start with 1
if obj_name.split('.')[-1] != 'obj':
obj_name = obj_name + '.obj'
# write obj
with open(obj_name, 'w') as f:
# write vertices & colors
for i in range(vertices.shape[0]):
# s = 'v {} {} {} \n'.format(vertices[0,i], vertices[1,i], vertices[2,i])
s = 'v {} {} {} {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2], colors[i, 0], colors[i, 1], colors[i, 2])
f.write(s)
# write f: ver ind/ uv ind
[k, ntri] = triangles.shape
for i in range(triangles.shape[0]):
# s = 'f {} {} {}\n'.format(triangles[i, 0], triangles[i, 1], triangles[i, 2])
s = 'f {} {} {}\n'.format(triangles[i, 2], triangles[i, 1], triangles[i, 0])
f.write(s)
## TODO: c++ version
def write_obj_with_texture(obj_name, vertices, triangles, texture, uv_coords):
''' Save 3D face model with texture represented by texture map.
Ref: https://github.com/patrikhuber/eos/blob/bd00155ebae4b1a13b08bf5a991694d682abbada/include/eos/core/Mesh.hpp
Args:
obj_name: str
vertices: shape = (nver, 3)
triangles: shape = (ntri, 3)
texture: shape = (256,256,3)
uv_coords: shape = (nver, 3) max value<=1
'''
if obj_name.split('.')[-1] != 'obj':
obj_name = obj_name + '.obj'
mtl_name = obj_name.replace('.obj', '.mtl')
texture_name = obj_name.replace('.obj', '_texture.png')
triangles = triangles.copy()
triangles += 1 # mesh lab start with 1
# write obj
with open(obj_name, 'w') as f:
# first line: write mtlib(material library)
s = "mtllib {}\n".format(os.path.abspath(mtl_name))
f.write(s)
# write vertices
for i in range(vertices.shape[0]):
s = 'v {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2])
f.write(s)
# write uv coords
for i in range(uv_coords.shape[0]):
s = 'vt {} {}\n'.format(uv_coords[i,0], 1 - uv_coords[i,1])
f.write(s)
f.write("usemtl FaceTexture\n")
# write f: ver ind/ uv ind
for i in range(triangles.shape[0]):
s = 'f {}/{} {}/{} {}/{}\n'.format(triangles[i,2], triangles[i,2], triangles[i,1], triangles[i,1], triangles[i,0], triangles[i,0])
f.write(s)
# write mtl
with open(mtl_name, 'w') as f:
f.write("newmtl FaceTexture\n")
s = 'map_Kd {}\n'.format(os.path.abspath(texture_name)) # map to image
f.write(s)
# write texture as png
imsave(texture_name, texture)
# c++ version
def write_obj_with_colors_texture(obj_name, vertices, triangles, colors, texture, uv_coords):
''' Save 3D face model with texture.
Ref: https://github.com/patrikhuber/eos/blob/bd00155ebae4b1a13b08bf5a991694d682abbada/include/eos/core/Mesh.hpp
Args:
obj_name: str
vertices: shape = (nver, 3)
triangles: shape = (ntri, 3)
colors: shape = (nver, 3)
texture: shape = (256,256,3)
uv_coords: shape = (nver, 3) max value<=1
'''
if obj_name.split('.')[-1] != 'obj':
obj_name = obj_name + '.obj'
mtl_name = obj_name.replace('.obj', '.mtl')
texture_name = obj_name.replace('.obj', '_texture.png')
triangles = triangles.copy()
triangles += 1 # mesh lab start with 1
# write obj
vertices, colors, uv_coords = vertices.astype(np.float32).copy(), colors.astype(np.float32).copy(), uv_coords.astype(np.float32).copy()
mesh_core_cython.write_obj_with_colors_texture_core(str.encode(obj_name), str.encode(os.path.abspath(mtl_name)), vertices, triangles, colors, uv_coords, vertices.shape[0], triangles.shape[0], uv_coords.shape[0])
# write mtl
with open(mtl_name, 'w') as f:
f.write("newmtl FaceTexture\n")
s = 'map_Kd {}\n'.format(os.path.abspath(texture_name)) # map to image
f.write(s)
# write texture as png
io.imsave(texture_name, texture) | /robotpipe-0.0.4-cp39-cp39-win_amd64.whl/heimarobot/insightface/thirdparty/face3d/mesh/io.py | 0.558086 | 0.212968 | io.py | pypi |
import cv2
import numpy as np
from skimage import transform as trans
arcface_dst = np.array(
[[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366],
[41.5493, 92.3655], [70.7299, 92.2041]],
dtype=np.float32)
def estimate_norm(lmk, image_size=112,mode='arcface'):
assert lmk.shape == (5, 2)
assert image_size%112==0 or image_size%128==0
if image_size%112==0:
ratio = float(image_size)/112.0
diff_x = 0
else:
ratio = float(image_size)/128.0
diff_x = 8.0*ratio
dst = arcface_dst * ratio
dst[:,0] += diff_x
tform = trans.SimilarityTransform()
tform.estimate(lmk, dst)
M = tform.params[0:2, :]
return M
def norm_crop(img, landmark, image_size=112, mode='arcface'):
M = estimate_norm(landmark, image_size, mode)
warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0)
return warped
def norm_crop2(img, landmark, image_size=112, mode='arcface'):
M = estimate_norm(landmark, image_size, mode)
warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0)
return warped, M
def square_crop(im, S):
if im.shape[0] > im.shape[1]:
height = S
width = int(float(im.shape[1]) / im.shape[0] * S)
scale = float(S) / im.shape[0]
else:
width = S
height = int(float(im.shape[0]) / im.shape[1] * S)
scale = float(S) / im.shape[1]
resized_im = cv2.resize(im, (width, height))
det_im = np.zeros((S, S, 3), dtype=np.uint8)
det_im[:resized_im.shape[0], :resized_im.shape[1], :] = resized_im
return det_im, scale
def transform(data, center, output_size, scale, rotation):
scale_ratio = scale
rot = float(rotation) * np.pi / 180.0
#translation = (output_size/2-center[0]*scale_ratio, output_size/2-center[1]*scale_ratio)
t1 = trans.SimilarityTransform(scale=scale_ratio)
cx = center[0] * scale_ratio
cy = center[1] * scale_ratio
t2 = trans.SimilarityTransform(translation=(-1 * cx, -1 * cy))
t3 = trans.SimilarityTransform(rotation=rot)
t4 = trans.SimilarityTransform(translation=(output_size / 2,
output_size / 2))
t = t1 + t2 + t3 + t4
M = t.params[0:2]
cropped = cv2.warpAffine(data,
M, (output_size, output_size),
borderValue=0.0)
return cropped, M
def trans_points2d(pts, M):
new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
for i in range(pts.shape[0]):
pt = pts[i]
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32)
new_pt = np.dot(M, new_pt)
#print('new_pt', new_pt.shape, new_pt)
new_pts[i] = new_pt[0:2]
return new_pts
def trans_points3d(pts, M):
scale = np.sqrt(M[0][0] * M[0][0] + M[0][1] * M[0][1])
#print(scale)
new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
for i in range(pts.shape[0]):
pt = pts[i]
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32)
new_pt = np.dot(M, new_pt)
#print('new_pt', new_pt.shape, new_pt)
new_pts[i][0:2] = new_pt[0:2]
new_pts[i][2] = pts[i][2] * scale
return new_pts
def trans_points(pts, M):
if pts.shape[1] == 2:
return trans_points2d(pts, M)
else:
return trans_points3d(pts, M) | /robotpipe-0.0.4-cp39-cp39-win_amd64.whl/heimarobot/insightface/utils/face_align.py | 0.403332 | 0.532547 | face_align.py | pypi |
import os
import hashlib
import requests
from tqdm import tqdm
def check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
sha1_file = sha1.hexdigest()
l = min(len(sha1_file), len(sha1_hash))
return sha1.hexdigest()[0:l] == sha1_hash[0:l]
def download_file(url, path=None, overwrite=False, sha1_hash=None):
"""Download an given URL
Parameters
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
Returns
-------
str
The file path of the downloaded file.
"""
if path is None:
fname = url.split('/')[-1]
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[-1])
else:
fname = path
if overwrite or not os.path.exists(fname) or (
sha1_hash and not check_sha1(fname, sha1_hash)):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname)
print('Downloading %s from %s...' % (fname, url))
r = requests.get(url, stream=True)
if r.status_code != 200:
raise RuntimeError("Failed downloading url %s" % url)
total_length = r.headers.get('content-length')
with open(fname, 'wb') as f:
if total_length is None: # no content length header
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
else:
total_length = int(total_length)
for chunk in tqdm(r.iter_content(chunk_size=1024),
total=int(total_length / 1024. + 0.5),
unit='KB',
unit_scale=False,
dynamic_ncols=True):
f.write(chunk)
if sha1_hash and not check_sha1(fname, sha1_hash):
raise UserWarning('File {} is downloaded but the content hash does not match. ' \
'The repo may be outdated or download may be incomplete. ' \
'If the "repo_url" is overridden, consider switching to ' \
'the default repo.'.format(fname))
return fname | /robotpipe-0.0.4-cp39-cp39-win_amd64.whl/heimarobot/insightface/utils/download.py | 0.688468 | 0.308946 | download.py | pypi |
import cv2
import math
import numpy as np
from skimage import transform as trans
def transform(data, center, output_size, scale, rotation):
scale_ratio = scale
rot = float(rotation) * np.pi / 180.0
#translation = (output_size/2-center[0]*scale_ratio, output_size/2-center[1]*scale_ratio)
t1 = trans.SimilarityTransform(scale=scale_ratio)
cx = center[0] * scale_ratio
cy = center[1] * scale_ratio
t2 = trans.SimilarityTransform(translation=(-1 * cx, -1 * cy))
t3 = trans.SimilarityTransform(rotation=rot)
t4 = trans.SimilarityTransform(translation=(output_size / 2,
output_size / 2))
t = t1 + t2 + t3 + t4
M = t.params[0:2]
cropped = cv2.warpAffine(data,
M, (output_size, output_size),
borderValue=0.0)
return cropped, M
def trans_points2d(pts, M):
new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
for i in range(pts.shape[0]):
pt = pts[i]
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32)
new_pt = np.dot(M, new_pt)
#print('new_pt', new_pt.shape, new_pt)
new_pts[i] = new_pt[0:2]
return new_pts
def trans_points3d(pts, M):
scale = np.sqrt(M[0][0] * M[0][0] + M[0][1] * M[0][1])
#print(scale)
new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
for i in range(pts.shape[0]):
pt = pts[i]
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32)
new_pt = np.dot(M, new_pt)
#print('new_pt', new_pt.shape, new_pt)
new_pts[i][0:2] = new_pt[0:2]
new_pts[i][2] = pts[i][2] * scale
return new_pts
def trans_points(pts, M):
if pts.shape[1] == 2:
return trans_points2d(pts, M)
else:
return trans_points3d(pts, M)
def estimate_affine_matrix_3d23d(X, Y):
''' Using least-squares solution
Args:
X: [n, 3]. 3d points(fixed)
Y: [n, 3]. corresponding 3d points(moving). Y = PX
Returns:
P_Affine: (3, 4). Affine camera matrix (the third row is [0, 0, 0, 1]).
'''
X_homo = np.hstack((X, np.ones([X.shape[0],1]))) #n x 4
P = np.linalg.lstsq(X_homo, Y)[0].T # Affine matrix. 3 x 4
return P
def P2sRt(P):
''' decompositing camera matrix P
Args:
P: (3, 4). Affine Camera Matrix.
Returns:
s: scale factor.
R: (3, 3). rotation matrix.
t: (3,). translation.
'''
t = P[:, 3]
R1 = P[0:1, :3]
R2 = P[1:2, :3]
s = (np.linalg.norm(R1) + np.linalg.norm(R2))/2.0
r1 = R1/np.linalg.norm(R1)
r2 = R2/np.linalg.norm(R2)
r3 = np.cross(r1, r2)
R = np.concatenate((r1, r2, r3), 0)
return s, R, t
def matrix2angle(R):
''' get three Euler angles from Rotation Matrix
Args:
R: (3,3). rotation matrix
Returns:
x: pitch
y: yaw
z: roll
'''
sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
singular = sy < 1e-6
if not singular :
x = math.atan2(R[2,1] , R[2,2])
y = math.atan2(-R[2,0], sy)
z = math.atan2(R[1,0], R[0,0])
else :
x = math.atan2(-R[1,2], R[1,1])
y = math.atan2(-R[2,0], sy)
z = 0
# rx, ry, rz = np.rad2deg(x), np.rad2deg(y), np.rad2deg(z)
rx, ry, rz = x*180/np.pi, y*180/np.pi, z*180/np.pi
return rx, ry, rz | /robotpipe-0.0.4-cp39-cp39-win_amd64.whl/heimarobot/insightface/utils/transform.py | 0.442637 | 0.533884 | transform.py | pypi |
import os
import os.path as osp
import errno
def get_model_dir(name, root='~/.insightface'):
root = os.path.expanduser(root)
model_dir = osp.join(root, 'models', name)
return model_dir
def makedirs(path):
"""Create directory recursively if not exists.
Similar to `makedir -p`, you can skip checking existence before this function.
Parameters
----------
path : str
Path of the desired dir
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def try_import(package, message=None):
"""Try import specified package, with custom message support.
Parameters
----------
package : str
The name of the targeting package.
message : str, default is None
If not None, this function will raise customized error message when import error is found.
Returns
-------
module if found, raise ImportError otherwise
"""
try:
return __import__(package)
except ImportError as e:
if not message:
raise e
raise ImportError(message)
def try_import_cv2():
"""Try import cv2 at runtime.
Returns
-------
cv2 module if found. Raise ImportError otherwise
"""
msg = "cv2 is required, you can install by package manager, e.g. 'apt-get', \
or `pip install opencv-python --user` (note that this is unofficial PYPI package)."
return try_import('cv2', msg)
def try_import_mmcv():
"""Try import mmcv at runtime.
Returns
-------
mmcv module if found. Raise ImportError otherwise
"""
msg = "mmcv is required, you can install by first `pip install Cython --user` \
and then `pip install mmcv --user` (note that this is unofficial PYPI package)."
return try_import('mmcv', msg)
def try_import_rarfile():
"""Try import rarfile at runtime.
Returns
-------
rarfile module if found. Raise ImportError otherwise
"""
msg = "rarfile is required, you can install by first `sudo apt-get install unrar` \
and then `pip install rarfile --user` (note that this is unofficial PYPI package)."
return try_import('rarfile', msg)
def import_try_install(package, extern_url=None):
"""Try import the specified package.
If the package not installed, try use pip to install and import if success.
Parameters
----------
package : str
The name of the package trying to import.
extern_url : str or None, optional
The external url if package is not hosted on PyPI.
For example, you can install a package using:
"pip install git+http://github.com/user/repo/tarball/master/egginfo=xxx".
In this case, you can pass the url to the extern_url.
Returns
-------
<class 'Module'>
The imported python module.
"""
try:
return __import__(package)
except ImportError:
try:
from pip import main as pipmain
except ImportError:
from pip._internal import main as pipmain
# trying to install package
url = package if extern_url is None else extern_url
pipmain(['install', '--user',
url]) # will raise SystemExit Error if fails
# trying to load again
try:
return __import__(package)
except ImportError:
import sys
import site
user_site = site.getusersitepackages()
if user_site not in sys.path:
sys.path.append(user_site)
return __import__(package)
return __import__(package)
def try_import_dali():
"""Try import NVIDIA DALI at runtime.
"""
try:
dali = __import__('nvidia.dali', fromlist=['pipeline', 'ops', 'types'])
dali.Pipeline = dali.pipeline.Pipeline
except ImportError:
class dali:
class Pipeline:
def __init__(self):
raise NotImplementedError(
"DALI not found, please check if you installed it correctly."
)
return dali | /robotpipe-0.0.4-cp39-cp39-win_amd64.whl/heimarobot/insightface/utils/filesystem.py | 0.606964 | 0.164819 | filesystem.py | pypi |
from __future__ import division
import numpy as np
import cv2
import onnx
import onnxruntime
from ..utils import face_align
__all__ = [
'ArcFaceONNX',
]
class ArcFaceONNX:
def __init__(self, model_file=None, session=None):
assert model_file is not None
self.model_file = model_file
self.session = session
self.taskname = 'recognition'
find_sub = False
find_mul = False
model = onnx.load(self.model_file)
graph = model.graph
for nid, node in enumerate(graph.node[:8]):
#print(nid, node.name)
if node.name.startswith('Sub') or node.name.startswith('_minus'):
find_sub = True
if node.name.startswith('Mul') or node.name.startswith('_mul'):
find_mul = True
if find_sub and find_mul:
#mxnet arcface model
input_mean = 0.0
input_std = 1.0
else:
input_mean = 127.5
input_std = 127.5
self.input_mean = input_mean
self.input_std = input_std
#print('input mean and std:', self.input_mean, self.input_std)
if self.session is None:
self.session = onnxruntime.InferenceSession(self.model_file, None)
input_cfg = self.session.get_inputs()[0]
input_shape = input_cfg.shape
input_name = input_cfg.name
self.input_size = tuple(input_shape[2:4][::-1])
self.input_shape = input_shape
outputs = self.session.get_outputs()
output_names = []
for out in outputs:
output_names.append(out.name)
self.input_name = input_name
self.output_names = output_names
assert len(self.output_names)==1
self.output_shape = outputs[0].shape
def prepare(self, ctx_id, **kwargs):
if ctx_id<0:
self.session.set_providers(['CPUExecutionProvider'])
def get(self, img, face):
aimg = face_align.norm_crop(img, landmark=face.kps, image_size=self.input_size[0])
face.embedding = self.get_feat(aimg).flatten()
return face.embedding
def compute_sim(self, feat1, feat2):
from numpy.linalg import norm
feat1 = feat1.ravel()
feat2 = feat2.ravel()
sim = np.dot(feat1, feat2) / (norm(feat1) * norm(feat2))
return sim
def get_feat(self, imgs):
if not isinstance(imgs, list):
imgs = [imgs]
input_size = self.input_size
blob = cv2.dnn.blobFromImages(imgs, 1.0 / self.input_std, input_size,
(self.input_mean, self.input_mean, self.input_mean), swapRB=True)
net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
return net_out
def forward(self, batch_data):
blob = (batch_data - self.input_mean) / self.input_std
net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
return net_out | /robotpipe-0.0.4-cp39-cp39-win_amd64.whl/heimarobot/insightface/model_zoo/arcface_onnx.py | 0.460046 | 0.15241 | arcface_onnx.py | pypi |
import datetime
from robotpt_common_utils import math_tools
NUM_DAYS_IN_A_WEEK = 7
def subtract_days(start_date, end_date):
if (
not (type(start_date) == type(end_date) == datetime.date)
and not (type(start_date) == type(end_date) == datetime.datetime)
):
raise TypeError("Inputs must be datetimes")
return (end_date - start_date).days
def subtract_weeks(start_date, end_date, is_full_weeks_only=True):
weeks_difference = subtract_days(start_date, end_date) / NUM_DAYS_IN_A_WEEK
if is_full_weeks_only:
return int(weeks_difference)
else:
return weeks_difference
def get_date_range(
start_date,
end_date,
increment_days=1,
output_format=None
):
if not math_tools.is_int(increment_days):
raise ValueError("'increment_days' must be an int")
if increment_days < 0:
raise ValueError("'increment_days' must be nonnegative")
if start_date > end_date:
raise IOError("'start_date' must be less than 'end_date'")
date = start_date
date_range = []
while date < end_date:
date_range.append(date)
date += datetime.timedelta(days=increment_days)
if output_format is None:
return date_range
elif type(output_format) is str:
return [
datetime.datetime.strftime(date, output_format)
for date in date_range
]
def subtract_times(t1, t2):
if type(t1) is not datetime.time or type(t2) is not datetime.time:
raise ValueError
dt1 = _time_from_0(t1)
dt2 = _time_from_0(t2)
days = dt2.days - dt1.days
seconds = dt2.seconds - dt1.seconds
if abs(seconds) == 24*60*60:
seconds = 0
elif abs(seconds) == 12*60*60:
seconds = 12*60*60
days = -1
elif seconds > 12*60*60 and days == 0:
seconds = -(seconds - 12*60*60)
days = 0
return datetime.timedelta(days=days, seconds=seconds)
def _time_from_0(t):
t_dt = datetime.datetime.now().replace(hour=t.hour, minute=t.minute, second=t.second, microsecond=t.microsecond)
midnight = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
dt = t_dt - midnight
if dt.seconds == 24*60*60:
dt = datetime.timedelta()
if dt.seconds >= 12*60*60:
dt = datetime.timedelta(seconds=dt.seconds - 24*60*60)
return dt | /robotpt-common-utils-0.0.8.tar.gz/robotpt-common-utils-0.0.8/robotpt_common_utils/dates.py | 0.568775 | 0.244543 | dates.py | pypi |
import wpilib
from adis16470 import ADIS16470_IMU, ADIS16470CalibrationTime
KYAW_DEFAULT = "Z-Axis"
KYAW_X_AXIS = "X-Axis"
KYAW_Y_AXIS = "Y-Axis"
class MyRobot(wpilib.TimedRobot):
def robotInit(self):
self.timer = wpilib.Timer()
self.m_imu = ADIS16470_IMU()
self.m_yawSelected = KYAW_DEFAULT
self.m_runCal = False
self.m_configCal = False
self.m_reset = False
self.m_setYawAxis = False
self.m_yawActiveAxis = ADIS16470_IMU.IMUAxis.kZ
self.m_yawChooser = wpilib.SendableChooser()
self.m_yawChooser.setDefaultOption(KYAW_DEFAULT, KYAW_DEFAULT)
self.m_yawChooser.addOption(KYAW_X_AXIS, KYAW_X_AXIS)
self.m_yawChooser.addOption(KYAW_Y_AXIS, KYAW_Y_AXIS)
wpilib.SmartDashboard.putData("IMUYawAxis", self.m_yawChooser)
wpilib.SmartDashboard.putBoolean("RunCal", False)
wpilib.SmartDashboard.putBoolean("ConfigCal", False)
wpilib.SmartDashboard.putBoolean("Reset", False)
wpilib.SmartDashboard.putBoolean("SetYawAxis", False)
def robotPeriodic(self):
"""
This function is called every robot packet, no matter the mode. Use
this for items like diagnostics that you want ran during disabled,
autonomous, teleoperated and test.
This runs after the mode specific periodic functions, but before
LiveWindow and SmartDashboard integrated updating.
"""
wpilib.SmartDashboard.putNumber("YawAngle", self.m_imu.getAngle())
wpilib.SmartDashboard.putNumber(
"XCompAngle", self.m_imu.getXComplementaryAngle()
)
wpilib.SmartDashboard.putNumber(
"YCompAngle", self.m_imu.getYComplementaryAngle()
)
self.m_runCal = wpilib.SmartDashboard.getBoolean("RunCal", False)
self.m_configCal = wpilib.SmartDashboard.getBoolean("ConfigCal", False)
self.m_reset = wpilib.SmartDashboard.getBoolean("Reset", False)
self.m_setYawAxis = wpilib.SmartDashboard.getBoolean("SetYawAxis", False)
self.m_yawSelected = self.m_yawChooser.getSelected()
# Set IMU settings
if self.m_configCal:
self.m_imu.configCalTime(ADIS16470CalibrationTime._8s)
wpilib.SmartDashboard.putBoolean("ConfigCal", False)
self.m_configCal = False
if self.m_reset:
self.m_imu.Reset()
wpilib.SmartDashboard.putBoolean("Reset", False)
self.m_reset = False
if self.m_runCal:
self.m_imu.Calibrate()
wpilib.SmartDashboard.putBoolean("RunCal", False)
self.m_runCal = False
# Read the desired yaw axis from the dashboard
if self.m_yawSelected == "X-Axis":
self.m_yawActiveAxis = ADIS16470_IMU.IMUAxis.kX
elif self.m_yawSelected == "Y-Axis":
self.m_yawActiveAxis = ADIS16470_IMU.IMUAxis.kY
else:
self.m_yawActiveAxis = ADIS16470_IMU.IMUAxis.kZ
# Set the desired yaw axis from the dashboard
if self.m_setYawAxis:
self.m_imu.SetYawAxis(self.m_yawActiveAxis)
wpilib.SmartDashboard.putBoolean("SetYawAxis", False)
self.m_setYawAxis = False
if __name__ == "__main__":
wpilib.run(MyRobot) | /robotpy-adi-2021.0.0.tar.gz/robotpy-adi-2021.0.0/examples/adis16470/robot.py | 0.549399 | 0.241814 | robot.py | pypi |
import importlib.util
from os.path import join, dirname
from pkg_resources import iter_entry_points
import sys
from typing import Dict, List, Optional, Set
import warnings
def _hacky_entrypoint_loader(module_name):
# load the root parent spec
pkgs = module_name.split(".")
spec = importlib.util.find_spec(pkgs[0])
assert spec is not None and spec.origin is not None
# even namespace packages are installed in the path, so just guess
# ... and maybe it works?
fname = join(dirname(spec.origin), *pkgs[1:]) + ".py"
spec = importlib.util.spec_from_file_location(module_name, fname)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
return module
class PkgCfg:
"""
Contains information about an installed package that uses robotpy-build
"""
def __init__(self, entry_point):
try:
self.module = entry_point.load()
except Exception as e:
try:
self.module = _hacky_entrypoint_loader(entry_point.module_name)
except Exception:
raise e
self.name = entry_point.name
# could deduce this, but this is probably fine
self.libinit_import = getattr(self.module, "libinit_import", None)
self.depends = getattr(self.module, "depends", [])
self.pypi_package = getattr(self.module, "pypi_package", None)
self.package_name = getattr(self.module, "package_name", None)
self.static_lib = getattr(self.module, "static_lib", False)
def get_include_dirs(self) -> Optional[List[str]]:
"""
Include directories provided by this module
"""
fn = getattr(self.module, "get_include_dirs", None)
if fn:
return fn()
return None
def get_library_dirs(self) -> Optional[List[str]]:
"""
Directories where libraries reside
"""
fn = getattr(self.module, "get_library_dirs", None)
if fn:
return fn()
return None
def get_library_dirs_rel(self) -> Optional[List[str]]:
"""
Directories where libraries reside, relative to package
"""
fn = getattr(self.module, "get_library_dirs_rel", None)
if fn:
return fn()
return None
def get_library_names(self) -> Optional[List[str]]:
"""
Names of libraries provided (for linking)
"""
fn = getattr(self.module, "get_library_names", None)
if fn:
return fn()
return None
def get_extra_objects(self) -> Optional[List[str]]:
"""
Names of extra objects to link in
"""
fn = getattr(self.module, "get_extra_objects", None)
if fn:
return fn()
return None
def get_library_full_names(self) -> Optional[List[str]]:
"""
Full names of libraries provided (needed for OSX support)
"""
fn = getattr(self.module, "get_library_full_names", None)
if fn:
return fn()
return None
def get_type_casters(self, casters: Dict[str, str]) -> None:
"""
Legacy type caster information
"""
t = {}
r = self.get_type_casters_cfg(t)
for k, v in t.items():
if "hdr" in v:
casters[k] = v["hdr"]
return r
def get_type_casters_cfg(self, casters: Dict[str, str]) -> None:
"""
Type caster headers provided
key: type name
value: a dict with keys:
hdr: header file
darg: force default arg
"""
fn = getattr(self.module, "get_type_casters_cfg", None)
if fn:
return fn(casters)
fn = getattr(self.module, "get_type_casters", None)
if fn:
t = {}
r = fn(t)
casters.update({k: {"hdr": v} for k, v in t.items()})
return r
class PkgCfgProvider:
"""
Retrieves information about robotpy-build packages
Warning: Not to be confused with 'pkg-config'
"""
def __init__(self):
self.pkgs = {}
def detect_pkgs(self) -> None:
"""
Detect and load packages under the robotpybuild entry point group.
Only loads packages that are dependencies.
"""
deps_names = set().union(*[pkg.depends for pkg in self.pkgs.values()])
entry_points = list(iter_entry_points(group="robotpybuild", name=None))
# Only load the dependencies of the package we're building.
# If we load the [package being built], then the current build will fail.
# If we load a package that depends on the [package being built],
# then the [package being built] will be loaded and the current build will fail.
run_loop = True
while run_loop:
run_loop = False
for ep in entry_points:
if ep.name in self.pkgs: # Prevents loading the package being built
continue
if ep.name not in deps_names and ep.name != "robotpy-build":
continue
try:
pkg = PkgCfg(ep)
except Exception as e:
warnings.warn(f"Error loading entry point {ep.name}: {e}")
else:
self.add_pkg(pkg)
deps_names |= set(pkg.depends)
run_loop = True
def add_pkg(self, pkg: PkgCfg) -> None:
self.pkgs[pkg.name] = pkg
def get_pkg(self, name: str) -> PkgCfg:
try:
return self.pkgs[name]
except KeyError:
raise KeyError("robotpy-build package '%s' not installed" % name)
def get_all_deps(self, name: str) -> Set[PkgCfg]:
deps: Set[PkgCfg] = set()
def _get(name: str):
pkg = self.get_pkg(name)
if pkg in deps:
return pkg
deps.add(pkg)
for dep in pkg.depends:
_get(dep)
return pkg
pkg = _get(name)
deps.remove(pkg)
return deps | /robotpy_build-2023.1.2-py3-none-any.whl/robotpy_build/pkgcfg_provider.py | 0.579519 | 0.24271 | pkgcfg_provider.py | pypi |
from distutils.util import get_platform as _get_platform
from dataclasses import dataclass, field
from typing import List
import re
import typing
# wpilib platforms at https://github.com/wpilibsuite/native-utils/blob/master/src/main/java/edu/wpi/first/nativeutils/WPINativeUtilsExtension.java
@dataclass
class WPILibMavenPlatform:
arch: str
os: str = "linux"
libprefix: str = "lib"
#: runtime linkage
libext: str = ".so"
#: compile time linkage
linkext: str = None
#: static linkage
staticext: str = ".a"
defines: List[str] = field(default_factory=list)
def __post_init__(self):
# linkext defaults to libext
if self.linkext is None:
self.linkext = self.libext
self.defines = [f"{d} 1" for d in self.defines]
X86_64 = "x86-64"
# key is python platform, value is information about wpilib maven artifacts
_platforms = {
"linux-athena": WPILibMavenPlatform("athena", defines=["__FRC_ROBORIO__"]),
"linux-raspbian": WPILibMavenPlatform("arm32", defines=["__RASPBIAN__"]),
"linux-armv7l": WPILibMavenPlatform("arm32"),
"linux-x86_64": WPILibMavenPlatform(X86_64),
"linux-aarch64": WPILibMavenPlatform("arm64"),
"win32": WPILibMavenPlatform("x86", "windows", "", ".dll", ".lib", ".lib"),
"win-amd64": WPILibMavenPlatform(X86_64, "windows", "", ".dll", ".lib", ".lib"),
"macos-universal": WPILibMavenPlatform("universal", "osx", libext=".dylib"),
}
def get_platform_names() -> typing.List[str]:
return list(_platforms.keys())
def get_platform(name: typing.Optional[str] = None) -> WPILibMavenPlatform:
"""
Retrieve platform specific information
"""
# TODO: _PYTHON_HOST_PLATFORM is used for cross builds,
# and is returned directly from get_platform. Might
# be useful to note for the future.
if not name:
pyplatform = _get_platform()
# Check for 64 bit x86 macOS (version agnostic)
# - See https://github.com/pypa/setuptools/issues/2520 for universal2
# related questions? Sorta.
if (
re.fullmatch(r"macosx-.*-x86_64", pyplatform)
or re.fullmatch(r"macosx-.*-arm64", pyplatform)
or re.fullmatch(r"macosx-.*-universal2", pyplatform)
):
return _platforms["macos-universal"]
if pyplatform == "linux-armv7l":
try:
import distro
distro_id = distro.id()
if distro_id in ("nilrt", "nilrt-academic"):
pyplatform = "linux-athena"
elif distro_id == "raspbian":
pyplatform = "linux-raspbian"
except Exception:
pass
elif pyplatform == "linux-armv6":
try:
import distro
distro_id = distro.id()
if distro_id == "raspbian":
pyplatform = "linux-raspbian"
except Exception:
pass
name = pyplatform
try:
return _platforms[name]
except KeyError:
raise KeyError(f"platform {name} is not supported by robotpy-build!")
def get_platform_override_keys(platform: WPILibMavenPlatform):
# Used in places where overrides exist
return [
f"arch_{platform.arch}",
f"os_{platform.os}",
f"platform_{platform.os}_{platform.arch}",
] | /robotpy_build-2023.1.2-py3-none-any.whl/robotpy_build/platforms.py | 0.743913 | 0.187393 | platforms.py | pypi |
from os.path import abspath, join, dirname
from typing import Any, Dict, List, Optional
_root = abspath(dirname(__file__))
def get_include_dirs() -> Optional[List[str]]:
return [join(_root, "pybind11", "include"), join(_root, "include")]
def get_library_dirs() -> Optional[List[str]]:
pass
def get_type_casters_cfg(casters: Dict[str, Dict[str, Any]]) -> None:
casters.update(
{
# STL support
"std::vector": {"hdr": "pybind11/stl.h"},
"std::deque": {"hdr": "pybind11/stl.h"},
"std::list": {"hdr": "pybind11/stl.h"},
"std::array": {"hdr": "pybind11/stl.h"},
"std::valarray": {"hdr": "pybind11/stl.h"},
"std::set": {"hdr": "pybind11/stl.h"},
"std::map": {"hdr": "pybind11/stl.h"},
"std::unordered_map": {"hdr": "pybind11/stl.h"},
"std::optional": {"hdr": "pybind11/stl.h"},
"std::nullopt_t": {"hdr": "pybind11/stl.h"},
"std::variant": {"hdr": "pybind11/stl.h"},
"std::function": {"hdr": "pybind11/functional.h"},
"std::complex": {"hdr": "pybind11/complex.h"},
"std::chrono::duration": {"hdr": "pybind11/chrono.h"},
"std::chrono::time_point": {"hdr": "pybind11/chrono.h"},
# Eigen support (requires numpy)
"Eigen::Block": {"hdr": "pybind11/eigen.h"},
"Eigen::DiagonalMatrix": {"hdr": "pybind11/eigen.h"},
"Eigen::Matrix": {"hdr": "pybind11/eigen.h"},
"Eigen::MatrixXd": {"hdr": "pybind11/eigen.h"},
"Eigen::MatrixXdR": {"hdr": "pybind11/eigen.h"},
"Eigen::MatrixXi": {"hdr": "pybind11/eigen.h"},
"Eigen::MatrixXf": {"hdr": "pybind11/eigen.h"},
"Eigen::Ref": {"hdr": "pybind11/eigen.h"},
"Eigen::Matrix4d": {"hdr": "pybind11/eigen.h"},
"Eigen::RowVectorXf": {"hdr": "pybind11/eigen.h"},
"Eigen::SparseMatrix": {"hdr": "pybind11/eigen.h"},
"Eigen::SparseView": {"hdr": "pybind11/eigen.h"},
"Eigen::Vector": {"hdr": "pybind11/eigen.h"},
"Eigen::Vector3d": {"hdr": "pybind11/eigen.h"},
"Eigen::VectorXf": {"hdr": "pybind11/eigen.h"},
"Eigen::VectorXcf": {"hdr": "pybind11/eigen.h"},
}
)
def get_type_casters(casters: Dict[str, str]) -> None:
t = {}
get_type_casters_cfg(t)
for k, v in t.items():
if "hdr" in v:
casters[k] = v["hdr"] | /robotpy_build-2023.1.2-py3-none-any.whl/robotpy_build/pkgcfg.py | 0.732209 | 0.229676 | pkgcfg.py | pypi |
import yaml
from .hooks_datacfg import (
ClassData,
EnumData,
HooksDataYaml,
PropData,
FunctionData,
)
from typing import Dict, Optional
class GeneratorData:
"""
Used by the hooks to retrieve user-specified generation data, and
report to the user that there is data missing
"""
data: HooksDataYaml
def __init__(self, data: HooksDataYaml):
self.data = data
# report data
self.functions: Dict[str, bool] = {}
self.classes: Dict[str, Dict] = {}
self.enums: Dict[str, bool] = {}
self.attributes: Dict[str, bool] = {}
def get_class_data(self, name: str) -> ClassData:
data = self.data.classes.get(name)
missing = data is None
if missing:
data = ClassData()
self.classes[name] = {
"attributes": {},
"enums": {},
"functions": {},
"missing": missing,
}
return data
def get_cls_enum_data(
self, name: str, cls_key: str, cls_data: ClassData
) -> EnumData:
if name is None:
# TODO
return EnumData()
data = cls_data.enums.get(name)
if data is None:
self.classes[cls_key]["enums"][name] = False
data = EnumData()
return data
def get_enum_data(self, name: str) -> EnumData:
data = self.data.enums.get(name)
if data is None:
self.enums[name] = False
data = EnumData()
return data
def get_function_data(
self,
fn: dict,
signature: str,
cls_key: Optional[str] = None,
cls_data: Optional[ClassData] = None,
is_private: bool = False,
) -> FunctionData:
name = fn["name"]
if cls_data and cls_key:
data = cls_data.methods.get(name)
report_base = self.classes[cls_key]["functions"]
else:
data = self.data.functions.get(name)
report_base = self.functions
report_base = report_base.setdefault(name, {"overloads": {}, "first": fn})
missing = data is None
report_base["missing"] = missing and not is_private
if missing:
data = FunctionData()
else:
overload = data.overloads.get(signature)
missing = overload is None
if not missing and overload:
# merge overload information
data = data.dict(exclude_unset=True)
del data["overloads"]
data.update(overload.dict(exclude_unset=True))
data = FunctionData(**data)
report_base["overloads"][signature] = is_private or not missing
# TODO: doesn't belong here
is_overloaded = len(report_base["overloads"]) > 1
if is_overloaded:
report_base["first"]["x_overloaded"] = True
fn["x_overloaded"] = is_overloaded
return data
def get_cls_prop_data(
self, name: str, cls_key: str, cls_data: ClassData
) -> PropData:
data = cls_data.attributes.get(name)
if data is None:
self.classes[cls_key]["attributes"][name] = False
data = PropData()
return data
def get_prop_data(self, name) -> PropData:
data = self.data.attributes.get(name)
if data is None:
self.attributes[name] = False
data = PropData()
return data
def report_missing(self, name: str, reporter: "MissingReporter"):
"""
Generate a structure that can be copy/pasted into the generation
data yaml and print it out if there's missing data
"""
# note: sometimes we have strings from CppHeaderParser that aren't
# strings, so we need to cast them to str so yaml doesn't complain
data = self._process_missing(
self.attributes, self.functions, self.enums, "functions"
)
all_cls_data = {}
for cls_key, cls_data in self.classes.items():
result = self._process_missing(
cls_data["attributes"],
cls_data["functions"],
cls_data["enums"],
"methods",
)
if result or cls_data["missing"]:
all_cls_data[str(cls_key)] = result
if all_cls_data:
data["classes"] = all_cls_data
if data:
reporter.add_report(name, data)
return data
def _process_missing(self, attrs, fns, enums, fn_key: str):
data: Dict[str, Dict[str, Dict]] = {}
# attributes
if attrs:
for y in attrs.keys():
assert isinstance(y, str)
data["attributes"] = {str(n): {} for n in attrs.keys()}
# enums
if enums:
data["enums"] = {str(n): {} for n in enums.keys()}
# functions
fn_report = {}
for fn, fndata in fns.items():
fn = str(fn)
overloads = fndata["overloads"]
overloads_count = len(overloads)
if overloads_count > 1:
has_data = all(overloads.values())
else:
has_data = not fndata["missing"]
if not has_data:
d = {}
if fn == "swap":
d = {"ignore": True}
if overloads_count > 1:
fn_report[fn] = {
"overloads": {
k: dict(**d) for k, v in overloads.items() if not v
}
}
for k, v in fn_report[fn]["overloads"].items():
if "initializer_list" in k:
v["ignore"] = True
else:
fn_report[fn] = d
if fn_report:
data[fn_key] = fn_report
return data
class MissingReporter:
def __init__(self):
self.reports = {}
def _merge(self, src, dst):
for k, v in src.items():
if isinstance(v, dict):
if k not in dst:
dst[k] = v
else:
self._merge(v, dst[k])
else:
dst[k] = v
def add_report(self, name, data):
if name in self.reports:
self._merge(data, self.reports[name])
else:
self.reports[name] = data
def as_yaml(self):
for name, report in self.reports.items():
yield name, (
yaml.safe_dump(report, sort_keys=False)
.replace(" {}", "")
.replace("? ''\n :", '"":')
.replace("? ''\n :", '"":')
) | /robotpy_build-2023.1.2-py3-none-any.whl/robotpy_build/generator_data.py | 0.610802 | 0.336522 | generator_data.py | pypi |
from commands1 import Command, CommandGroup, ConditionalCommand
from commandbased.cancelcommand import CancelCommand
import inspect
__all__ = ["IF", "ELIF", "ELSE", "WHILE", "RETURN", "BREAK", "CommandFlow"]
class ConditionalFlow(ConditionalCommand):
def __init__(self, name, onTrue, onFalse, condition):
ConditionalCommand.__init__(self, name, onTrue, onFalse)
self.flowCondition = condition
def _condition(self):
return self.flowCondition()
class CommandFlow(CommandGroup):
def __init__(self, name):
CommandGroup.__init__(self, name)
callingFlow = _getCommandFlow()
self._source = getattr(callingFlow, "_source", self)
self._ifStack = None
def _popIfStack(self):
"""
We buffer conditionals until the last moment so we don't have trouble with
Commands being locked when they're added to a CommandGroup.
"""
if self._ifStack:
top = self._ifStack.pop(0)
cmd = None
for x in reversed(self._ifStack):
if x[0]:
cmd = ConditionalFlow("flowcontrolELIF", x[1], cmd, x[0])
else:
cmd = x[1]
cmd = ConditionalFlow("flowcontrolIF", top[1], cmd, top[0])
self._ifStack = None
self.addSequential(cmd)
# These _hook methods ensure we always add our buffered conditions
def addSequential(self, cmd, timeout=None):
self._popIfStack()
if timeout is None:
CommandGroup.addSequential(self, cmd)
else:
CommandGroup.addSequential(self, cmd, timeout)
def addParallel(self, cmd, timeout=None):
self._popIfStack()
if timeout is None:
CommandGroup.addParallel(self, cmd)
else:
CommandGroup.addParallel(self, cmd, timeout)
def start(self):
self._popIfStack()
CommandGroup.start(self)
def setParent(self, parent):
self._popIfStack()
CommandGroup.setParent(self, parent)
class CommandFlowWhile(CommandFlow):
def __init__(self, name, condition):
super().__init__(name)
self.whileCondition = condition
def isFinished(self):
if CommandGroup.isFinished(self):
if not self.whileCondition():
return True
self.start()
return False
def _getCommandFlow():
"""
Does some rather ugly stack inspection to find out which CommandGroup the
calling function was triggered from.
"""
# https://stackoverflow.com/a/14694234
stack = inspect.stack()
frame = stack[2].frame
while "self" not in frame.f_locals:
frame = frame.f_back
if frame is None:
raise ValueError("Could not find calling class for %s" % stack[1].function)
cg = frame.f_locals["self"]
if not isinstance(cg, CommandFlow):
raise ValueError(
"%s may not be used outside of a CommandFlow" % stack[1].function
)
return cg
def _buildCommandFlow(func, parent):
"""Turns the given function into a full CommandGroup."""
cg = CommandFlow(func.__name__)
func(cg)
return cg
def IF(condition):
"""
Use as a decorator for a function. That function will be placed into a
CommandGroup and run inside a ConditionalCommand with the given condition.
The decorated function must accept one positional argument that will be used
as its 'self'.
"""
def flowcontrolIF(func):
parent = _getCommandFlow()
cg = _buildCommandFlow(func, parent)
try:
parent._popIfStack()
except AttributeError:
pass
parent._ifStack = [(condition, cg)]
return flowcontrolIF
def ELIF(condition):
"""
Use as a decorator for a function. That function will be placed into a
CommandGroup which will be triggered by a ConditionalCommand that uses the
passed condition. That ConditionalCommand will then be added as the onFalse
for the ConditionalCommand created by a previous IF or ELIF.
"""
def flowcontrolELIF(func):
parent = _getCommandFlow()
cg = _buildCommandFlow(func, parent)
try:
parent._ifStack.append((condition, cg))
except AttributeError:
raise ValueError("Cannot use ELIF without IF")
return flowcontrolELIF
def ELSE(func):
"""
Use as a decorator for a function. That function will be placed into a
CommandGroup which will be added as the onFalse for the ConditionalCommand
created by a previous IF or ELIF.
"""
parent = _getCommandFlow()
cg = _buildCommandFlow(func, parent)
try:
parent._ifStack.append((None, cg))
except AttributeError:
raise ValueError("Cannot use ELSE without IF")
parent._popIfStack()
def WHILE(condition):
"""
Use as a decorator for a function. That function will be placed into a
CommandGroup, which will be added to a ConditionalCommand. It will be
modified to restart itself automatically.
"""
raise NotImplementedError("WHILE does not yet work with Commands v1")
def RETURN():
"""
Calling this function will end the source CommandGroup immediately.
"""
parent = _getCommandFlow()
parent.addSequential(CancelCommand(parent._source))
def BREAK(steps=1):
"""
Calling this function will end the loop that contains it. Pass an integer to
break out of that number of nested loops.
"""
raise ValueError("Cannot BREAK outside of a loop") | /robotpy_commands_v1-2022.4.1.0-cp37-cp37m-macosx_10_14_x86_64.whl/commandbased/flowcontrol.py | 0.641647 | 0.157234 | flowcontrol.py | pypi |
import os.path
import time
import threading
import cv2
import numpy as np
import logging
logger = logging.getLogger("cscore.storage")
class ImageWriter:
"""
Creates a thread that periodically writes images to a specified
directory. Useful for looking at images after a match has
completed.
The default location is ``/media/sda1/camera``. The folder
``/media/sda1`` is the default location that USB drives inserted into
the RoboRIO are mounted at. The USB drive must have a directory in it
named ``camera``.
.. note:: It is recommended to only write images when something useful
(such as targeting) is happening, otherwise you'll end up
with a lot of images written to disk that you probably aren't
interested in.
Intended usage is::
self.image_writer = ImageWriter()
..
while True:
img = ..
if self.logging_enabled:
self.image_writer.setImage(img)
"""
def __init__(
self,
*,
location_root="/media/sda1/camera",
capture_period=0.5,
image_format="jpg"
):
"""
:param location_root: Directory to write images to. A subdirectory
with the current time will be created, and
timestamped images will be written to the
subdirectory.
:param capture_period: How often to write images to disk
:param image_format: File extension of files to write
"""
self.location_root = os.path.abspath(location_root)
self.capture_period = capture_period
self.image_format = image_format
self.active = True
self._location = None
self.has_image = False
self.size = None
self.lock = threading.Condition()
self._thread = threading.Thread(target=self._run, daemon=True)
self._thread.start()
def setImage(self, img):
"""
Call this function when you wish to write the image to disk. Not
every image is written to disk. Makes a copy of the image.
:param img: A numpy array representing an OpenCV image
"""
if not self.active:
return
if (
self.size is None
or self.size[0] != img.shape[0]
or self.size[1] != img.shape[1]
):
h, w = img.shape[:2]
self.size = (h, w)
self.out1 = np.empty((h, w, 3), dtype=np.uint8)
self.out2 = np.empty((h, w, 3), dtype=np.uint8)
with self.lock:
cv2.copyMakeBorder(
img, 0, 0, 0, 0, cv2.BORDER_CONSTANT, value=(0, 0, 255), dst=self.out1
)
self.has_image = True
self.lock.notify()
@property
def location(self):
if self._location is None:
# This assures that we only log when a USB memory stick is plugged in
if not os.path.exists(self.location_root):
raise IOError(
"Logging disabled, %s does not exist" % self.location_root
)
# Can't do this when program starts, time might be wrong. Ideally by now the DS
# has connected, so the time will be correct
self._location = self.location_root + "/%s" % time.strftime(
"%Y-%m-%d %H.%M.%S"
)
logger.info("Logging to %s", self._location)
os.makedirs(self._location, exist_ok=True)
return self._location
def _run(self):
last = time.time()
logger.info("Storage thread started")
try:
while True:
with self.lock:
now = time.time()
while (not self.has_image) or (now - last) < self.capture_period:
self.lock.wait()
now = time.time()
self.out2, self.out1 = self.out1, self.out2
self.has_image = False
fname = "%s/%.2f.%s" % (self.location, now, self.image_format)
cv2.imwrite(fname, self.out2)
last = now
except IOError as e:
logger.error("Error logging images: %s", e)
logger.warn("Storage thread exited")
self.active = False | /robotpy_cscore-2023.4.3.0-cp38-cp38-macosx_10_16_x86_64.whl/cscore/imagewriter.py | 0.708112 | 0.275154 | imagewriter.py | pypi |
hal_version = "2019.4.1"
wpiutil_version = "2019.4.1"
frc_site = "http://first.wpi.edu/FRC/roborio/maven/release"
frc_site_dev = "http://first.wpi.edu/FRC/roborio/maven/development"
hal_site = "%s/edu/wpi/first/hal/hal-cpp" % frc_site
wpiutil_site = "%s/edu/wpi/first/wpiutil/wpiutil-cpp" % frc_site
hal_libs = "hal-cpp-%s-linuxathena.zip" % hal_version
hal_headers = "hal-cpp-%s-headers.zip" % hal_version
wpiutil_libs = "wpiutil-cpp-%s-linuxathena.zip" % wpiutil_version
def _download(url):
"""
Downloads the HAL zipfile to a temporary directory
"""
import atexit
import posixpath
from urllib.request import urlretrieve, urlcleanup
import sys
print("Downloading", posixpath.basename(url))
def _reporthook(count, blocksize, totalsize):
percent = int(count * blocksize * 100 / totalsize)
sys.stdout.write("\r%02d%%" % percent)
sys.stdout.flush()
filename, _ = urlretrieve(url, reporthook=_reporthook)
atexit.register(urlcleanup)
return filename
def extract_hal_headers(to=None):
"""
Downloads the HAL headers and extracts them to a specified location
:param to: is either a string or a dict of {src: dst}
"""
url = "%s/%s/%s" % (hal_site, hal_version, hal_headers)
return download_and_extract_zip(url, to=to)
def extract_hal_libs(to=None):
"""
Downloads the HAL library zipfile and extracts it to a specified location
:param to: is either a string or a dict of {src: dst}
"""
url = "%s/%s/%s" % (hal_site, hal_version, hal_libs)
return download_and_extract_zip(url, to=to)
def extract_wpiutil_libs(to=None):
"""
Downloads the WPIUtil library zipfile and extracts it to a specified location
:param to: is either a string or a dict of {src: dst}
"""
url = "%s/%s/%s" % (wpiutil_site, wpiutil_version, wpiutil_libs)
return download_and_extract_zip(url, to=to)
def download_and_extract_zip(url, to=None):
"""
Utility method intended to be useful for downloading/extracting
third party source zipfiles
"""
import atexit
import shutil
import tempfile
import zipfile
if to is None:
# generate temporary directory
tod = tempfile.TemporaryDirectory()
to = tod.name
atexit.register(tod.cleanup)
zip_fname = _download(url)
with zipfile.ZipFile(zip_fname) as z:
if isinstance(to, str):
z.extractall(to)
return to
else:
for src, dst in to.items():
with z.open(src, "r") as zfp:
with open(dst, "wb") as fp:
shutil.copyfileobj(zfp, fp) | /robotpy-hal-roborio-2019.2.3.tar.gz/robotpy-hal-roborio-2019.2.3/hal_impl/distutils.py | 0.411229 | 0.266077 | distutils.py | pypi |
from . import data
hal_data = data.hal_data
from . import functions as fns
def notify_new_ds_data():
"""Called when driver station data is modified"""
data.hooks.notifyDSData()
def set_autonomous(enabled, game_specific_message=None):
"""Only designed to be called on transition"""
if game_specific_message:
hal_data["event"]["game_specific_message"] = game_specific_message
hal_data["control"].update(
{"autonomous": True, "test": False, "enabled": enabled, "dsAttached": True}
)
if enabled:
hal_data["time"]["remaining"] = 135000000
else:
hal_data["time"]["remaining"] = None
notify_new_ds_data()
def set_test_mode(enabled):
"""Only designed to be called on transition"""
hal_data["control"].update(
{"autonomous": False, "test": True, "enabled": enabled, "dsAttached": True}
)
hal_data["time"]["remaining"] = None
notify_new_ds_data()
def set_teleop_mode(enabled):
"""Only designed to be called on transition"""
hal_data["control"].update(
{"autonomous": False, "test": False, "enabled": enabled, "dsAttached": True}
)
if enabled:
hal_data["time"]["remaining"] = fns.hooks.getFPGATime() + 120000000
else:
hal_data["time"]["remaining"] = None
notify_new_ds_data()
def set_disabled():
"""Only designed to be called on transition"""
hal_data["control"].update(
{"autonomous": False, "test": False, "enabled": False, "dsAttached": True}
)
hal_data["time"]["remaining"] = None
notify_new_ds_data()
def set_estop():
"""Only designed to be called on transition"""
hal_data["control"].update(
{
"autonomous": False,
"test": False,
"enabled": False,
"dsAttached": True,
"eStop": True,
}
)
hal_data["time"]["remaining"] = None
notify_new_ds_data()
def set_mode(new_mode, new_enabled, **kwargs):
"""
Calls the appropriate function based on the mode string
Can be called repeatedly, will only update a mode when it
changes.
:param new_mode: auto, test, or teleop
:param enabled: True if enabled, False otherwise
"""
assert new_mode in ["auto", "test", "teleop"]
new_enabled = bool(new_enabled)
ctrl = hal_data["control"]
enabled = ctrl["enabled"]
if ctrl["autonomous"]:
old_mode = "auto"
elif ctrl["test"]:
old_mode = "test"
else:
old_mode = "teleop"
if new_mode != old_mode or enabled != new_enabled:
if new_mode == "test":
set_test_mode(new_enabled, **kwargs)
elif new_mode == "auto":
set_autonomous(new_enabled, **kwargs)
elif new_mode == "teleop":
set_teleop_mode(new_enabled, **kwargs) | /robotpy-hal-sim-2019.2.3.tar.gz/robotpy-hal-sim-2019.2.3/hal_impl/mode_helpers.py | 0.59749 | 0.326097 | mode_helpers.py | pypi |
import typing
from ._pathfinder import Segment, pathfinder_modify_swerve, pathfinder_modify_tank
__all__ = ["SwerveModifier", "TankModifier"]
class SwerveModifier:
"""
The Swerve Modifier will take in a Source Trajectory and spit out 4 trajectories, 1 for each wheel on the drive.
This is commonly used in robotics for robots with 4 individual wheels in a 'swerve' configuration, where each wheel
can rotate to a specified heading while still being powered.
The Source Trajectory is measured from the centre of the drive base. The modification will not modify the central
trajectory
"""
def __init__(self, source: typing.List[Segment]) -> None:
"""Create an instance of the modifier
:param source: The source (center) trajectory
"""
self.source = source
self.fl = None
self.fr = None
self.bl = None
self.br = None
def modify(
self, wheelbase_width: float, wheelbase_depth: float
) -> "SwerveModifier":
"""Generate the Trajectory Modification
:param wheelbase_width: The width (in meters) between the individual left-right sides of the drivebase
:param wheelbase_depth: The width (in meters) between the individual front-back sides of the drivebase
:returns: self
"""
self.fl, self.fr, self.bl, self.br = pathfinder_modify_swerve(
self.source, wheelbase_width, wheelbase_depth
)
return self
def getSourceTrajectory(self) -> typing.List[Segment]:
"""Get the initial source trajectory"""
return self.source
def getFrontLeftTrajectory(self) -> typing.List[Segment]:
"""Get the trajectory for the front-left wheel of the drive base"""
return self.fl
def getFrontRightTrajectory(self) -> typing.List[Segment]:
"""Get the trajectory for the front-right wheel of the drive base"""
return self.fr
def getBackLeftTrajectory(self) -> typing.List[Segment]:
"""Get the trajectory for the back-left wheel of the drive base"""
return self.bl
def getBackRightTrajectory(self) -> typing.List[Segment]:
"""Get the trajectory for the back-right wheel of the drive base"""
return self.br
class TankModifier:
"""
The Tank Modifier will take in a Source Trajectory and a Wheelbase Width and spit out a Trajectory for each
side of the wheelbase. This is commonly used in robotics for robots which have a drive system similar
to a 'tank', where individual parallel sides are driven independently
The Source Trajectory is measured from the centre of the drive base. The modification will not modify the central
trajectory
"""
def __init__(self, source: typing.List[Segment]) -> None:
"""Create an instance of the modifier
:param source: The source (center) trajectory
"""
self.source = source
self.left = None
self.right = None
def modify(self, wheelbase_width: float) -> "TankModifier":
"""Generate the Trajectory Modification
:param wheelbase_width: The width (in meters) between the individual sides of the drivebase
:returns: self
"""
self.left, self.right = pathfinder_modify_tank(self.source, wheelbase_width)
return self
def getSourceTrajectory(self) -> typing.List[Segment]:
"""Get the initial source trajectory"""
return self.source
def getLeftTrajectory(self) -> typing.List[Segment]:
"""Get the trajectory for the left side of the drive base"""
return self.left
def getRightTrajectory(self) -> typing.List[Segment]:
"""Get the trajectory for the right side of the drive base"""
return self.right | /robotpy_pathfinder-0.2.6-cp35-cp35m-win_amd64.whl/pathfinder/modifiers.py | 0.83508 | 0.515925 | modifiers.py | pypi |
import math
import typing
from ._pathfinder import (
DistanceFollower as _DistanceFollower,
EncoderFollower as _EncoderFollower,
EncoderConfig,
FollowerConfig,
Segment,
pathfinder_follow_distance2,
pathfinder_follow_encoder2,
)
__all__ = ["DistanceFollower", "EncoderFollower"]
class DistanceFollower(_DistanceFollower):
"""
The DistanceFollower is an object designed to follow a trajectory based on distance covered input. This class can be used
for Tank or Swerve drive implementations.
"""
def __init__(self, trajectory: typing.List[Segment]):
super().__init__()
self.trajectory = trajectory
self.cfg = FollowerConfig()
def setTrajectory(self, trajectory: typing.List[Segment]) -> None:
"""Set a new trajectory to follow, and reset the cumulative errors and segment counts"""
self.trajectory = trajectory
self.reset()
def configurePIDVA(
self, kp: float, ki: float, kd: float, kv: float, ka: float
) -> None:
"""Configure the PID/VA Variables for the Follower
:param kp: The proportional term. This is usually quite high (0.8 - 1.0 are common values)
:param ki: The integral term. Currently unused.
:param kd: The derivative term. Adjust this if you are unhappy with the tracking of the follower. 0.0 is the default
:param kv: The velocity ratio. This should be 1 over your maximum velocity @ 100% throttle.
This converts m/s given by the algorithm to a scale of -1..1 to be used by your
motor controllers
:param ka: The acceleration term. Adjust this if you want to reach higher or lower speeds faster. 0.0 is the default
"""
self.cfg.kp = kp
self.cfg.ki = ki
self.cfg.kd = kd
self.cfg.kv = kv
self.cfg.ka = ka
def reset(self) -> None:
"""Reset the follower to start again. Encoders must be reconfigured."""
self.last_error = 0
self.segment = 0
def calculate(self, distance_covered: float) -> float:
"""Calculate the desired output for the motors, based on the distance the robot has covered.
This does not account for heading of the robot. To account for heading, add some extra terms in your control
loop for realignment based on gyroscope input and the desired heading given by this object.
:param distance_covered: The distance covered in meters
:returns: The desired output for your motor controller
"""
tlen = len(self.trajectory)
if self.segment >= tlen:
self.finished = 1
self.output = 0
self.heading = self.trajectory[-1].heading
return 0.0
else:
return pathfinder_follow_distance2(
self.cfg, self, self.trajectory[self.segment], tlen, distance_covered
)
def getHeading(self) -> float:
""":returns: the desired heading of the current point in the trajectory"""
return self.heading
def getSegment(self) -> Segment:
""":returns: the current segment being operated on"""
return self.trajectory[self.segment]
def isFinished(self) -> bool:
""":returns: whether we have finished tracking this trajectory or not."""
return self.segment >= len(self.trajectory)
class EncoderFollower(_EncoderFollower):
"""
The EncoderFollower is an object designed to follow a trajectory based on encoder input. This class can be used
for Tank or Swerve drive implementations.
"""
def __init__(self, trajectory: typing.List[Segment]):
super().__init__()
self.trajectory = trajectory
self.cfg = EncoderConfig()
def setTrajectory(self, trajectory: typing.List[Segment]) -> None:
"""Set a new trajectory to follow, and reset the cumulative errors and segment counts"""
self.trajectory = trajectory
self.reset()
def configurePIDVA(
self, kp: float, ki: float, kd: float, kv: float, ka: float
) -> None:
"""Configure the PID/VA Variables for the Follower
:param kp: The proportional term. This is usually quite high (0.8 - 1.0 are common values)
:param ki: The integral term. Currently unused.
:param kd: The derivative term. Adjust this if you are unhappy with the tracking of the follower. 0.0 is the default
:param kv: The velocity ratio. This should be 1 over your maximum velocity @ 100% throttle.
This converts m/s given by the algorithm to a scale of -1..1 to be used by your
motor controllers
:param ka: The acceleration term. Adjust this if you want to reach higher or lower speeds faster. 0.0 is the default
"""
self.cfg.kp = kp
self.cfg.ki = ki
self.cfg.kd = kd
self.cfg.kv = kv
self.cfg.ka = ka
def configureEncoder(
self, initial_position: int, ticks_per_revolution: int, wheel_diameter: float
) -> None:
"""Configure the Encoders being used in the follower.
:param initial_position: The initial 'offset' of your encoder. This should be set to the encoder value just
before you start to track
:param ticks_per_revolution: How many ticks per revolution the encoder has
:param wheel_diameter: The diameter of your wheels (or pulleys for track systems) in meters
"""
self.cfg.initial_position = initial_position
self.cfg.ticks_per_revolution = ticks_per_revolution
self.cfg.wheel_circumference = math.pi * wheel_diameter
def reset(self) -> None:
"""Reset the follower to start again. Encoders must be reconfigured."""
self.last_error = 0
self.segment = 0
def calculate(self, encoder_tick: int) -> float:
"""Calculate the desired output for the motors, based on the amount of ticks the encoder has gone through.
This does not account for heading of the robot. To account for heading, add some extra terms in your control
loop for realignment based on gyroscope input and the desired heading given by this object.
:param encoder_tick: The amount of ticks the encoder has currently measured.
:returns: The desired output for your motor controller
"""
tlen = len(self.trajectory)
if self.segment >= tlen:
self.finished = 1
self.output = 0
self.heading = self.trajectory[-1].heading
return 0.0
else:
return pathfinder_follow_encoder2(
self.cfg, self, self.trajectory[self.segment], tlen, encoder_tick
)
def getHeading(self) -> float:
""":returns: the desired heading of the current point in the trajectory"""
return self.heading
def getSegment(self) -> Segment:
""":returns: the current segment being operated on"""
return self.trajectory[self.segment]
def isFinished(self) -> bool:
""":returns: whether we have finished tracking this trajectory or not."""
return self.segment >= len(self.trajectory) | /robotpy_pathfinder-0.2.6-cp35-cp35m-win_amd64.whl/pathfinder/followers.py | 0.883312 | 0.441071 | followers.py | pypi |
import pickle
import socket
from robotpy_toolkit_7407.subsystem import Subsystem
from robotpy_toolkit_7407.network.objects import RobotStatusPacket, NetworkObject
from robotpy_toolkit_7407.utils import logger
class Network:
"""
### SCHEMA:
# Robot status:
- UDP status packet is sent by the robot to python on the DS laptop (port 5800)
- Pickled string of the RobotStatusPacket struct
- Python backend on DS is responsible for unpickling, validation, and converting to JSON for the frontend
# DS to Robot communication
- Python backend responsible for converting the actions into packets
- TCP packets on port 5801
- Pickled string
- Each distinct command will have a defined struct
- Response codes??
"""
subsystems: list[Subsystem]
udp_ip: str = "127.0.0.1"
udp_port: int = 5800
udp_socket: socket.socket
udp_max_packet_size: int = 32768
local_websocket_ip: str = "localhost"
local_websocket_port: int = 8765
@classmethod
def robot_init(cls, subsystems: list[Subsystem]):
logger.info("initializing network on robot", "[network]")
cls.subsystems = subsystems
logger.info("initializing udp", "[network]")
cls.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
logger.info("initialization complete", "[network]")
@classmethod
def ds_init(cls):
logger.info("initializing network on driver station", "[network]")
logger.info("initializing udp", "[network]")
cls.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cls.udp_socket.bind((cls.udp_ip, cls.udp_port))
logger.info("initialization complete", "[network]")
@classmethod
def robot_send_status(cls):
cls._udp_send_network_obj(cls._robot_get_status())
@classmethod
def ds_get_status(cls) -> RobotStatusPacket:
while True:
status = cls._udp_receive_network_obj()
if isinstance(status, RobotStatusPacket):
return status
@classmethod
def _robot_get_status(cls) -> RobotStatusPacket:
subsystem_status = []
for s in cls.subsystems:
subsystem_status.append(s.get_network_object())
return RobotStatusPacket(
subsystems=subsystem_status
)
@classmethod
def _udp_send_network_obj(cls, obj: NetworkObject):
data = pickle.dumps(obj)
if len(data) > cls.udp_max_packet_size:
logger.warn(f"Network object {obj} exceeded maximum packet size and was not sent")
return
cls.udp_socket.sendto(data, (cls.udp_ip, cls.udp_port))
@classmethod
def _udp_receive_network_obj(cls) -> NetworkObject:
data, _ = cls.udp_socket.recvfrom(cls.udp_max_packet_size)
obj: NetworkObject = pickle.loads(data)
return obj | /robotpy-toolkit-7407-0.3.2.tar.gz/robotpy-toolkit-7407-0.3.2/robotpy_toolkit_7407/network/network_system.py | 0.702632 | 0.231337 | network_system.py | pypi |
from robotpy_toolkit_7407.unum import Unum
unit = Unum.unit
m = M = unit("m", 0, "meter")
Ym = YM = unit("Ym", 10**24 * m, "yottameter")
Zm = ZM = unit("Zm", 10**21 * m, "zettameter")
Em = EM = unit("Em", 10**18 * m, "exameter")
Pm = PM = unit("Pm", 10**15 * m, "petameter")
Tm = TM = unit("Tm", 10**12 * m, "terameter")
Gm = GM = unit("Gm", 10**9 * m, "gigameter")
Mm = MM = unit("Mm", 10**6 * m, "megameter")
km = KM = unit("km", 10**3 * m, "kilometer")
hm = HM = unit("hm", 10**2 * m, "hectometer")
dam = DAM = unit("dam", 10**1 * m, "decameter")
ym = YM = unit("ym", 10**-24 * m, "yoctometer")
zm = ZM = unit("zm", 10**-21 * m, "zeptometer")
am = AM = unit("am", 10**-18 * m, "attometer")
fm = FM = unit("fm", 10**-15 * m, "femtometer")
pm = PM = unit("pm", 10**-12 * m, "picometer")
nm = NM = unit("nm", 10**-9 * m, "nanometer")
um = UM = unit("um", 10**-6 * m, "micrometer")
mm = MM = unit("mm", 10**-3 * m, "millimeter")
cm = CM = unit("cm", 10**-2 * m, "centimeter")
dm = DM = unit("dm", 10**-1 * m, "decimeter")
# Uppercase S is Siements; seconds can only use lowercase s
s = unit("s", 0, "second")
Ys = unit("Ys", 10**24 * s, "yottasecond")
Zs = unit("Zs", 10**21 * s, "zettasecond")
Es = unit("Es", 10**18 * s, "exasecond")
Ps = unit("Ps", 10**15 * s, "petasecond")
Ts = unit("Ts", 10**12 * s, "terasecond")
Gs = unit("Gs", 10**9 * s, "gigasecond")
Ms = unit("Ms", 10**6 * s, "megasecond")
ks = unit("ks", 10**3 * s, "kilosecond")
hs = unit("hs", 10**2 * s, "hectosecond")
das = unit("das", 10**1 * s, "decasecond")
ys = unit("ys", 10**-24 * s, "yoctosecond")
zs = unit("zs", 10**-21 * s, "zeptosecond")
#as = unit("as", 10**-18 * s, "attosecond") # as is a reserved word
fs = unit("fs", 10**-15 * s, "femtosecond")
ps = unit("ps", 10**-12 * s, "picosecond")
ns = unit("ns", 10**-9 * s, "nanosecond")
us = unit("us", 10**-6 * s, "microsecond")
ms = unit("ms", 10**-3 * s, "millisecond")
cs = unit("cs", 10**-2 * s, "centisecond")
ds = unit("ds", 10**-1 * s, "decisecond")
A = A = unit("A", 0, "ampere")
YA = YA = unit("YA", 10**24 * A, "yottaampere")
ZA = ZA = unit("ZA", 10**21 * A, "zettaampere")
EA = EA = unit("EA", 10**18 * A, "exaampere")
PA = PA = unit("PA", 10**15 * A, "petaampere")
TA = TA = unit("TA", 10**12 * A, "teraampere")
GA = GA = unit("GA", 10**9 * A, "gigaampere")
MA = MA = unit("MA", 10**6 * A, "megaampere")
kA = KA = unit("kA", 10**3 * A, "kiloampere")
hA = HA = unit("hA", 10**2 * A, "hectoampere")
daA = DAA = unit("daA", 10**1 * A, "decaampere")
yA = YA = unit("yA", 10**-24 * A, "yoctoampere")
zA = ZA = unit("zA", 10**-21 * A, "zeptoampere")
aA = AA = unit("aA", 10**-18 * A, "attoampere")
fA = FA = unit("fA", 10**-15 * A, "femtoampere")
pA = PA = unit("pA", 10**-12 * A, "picoampere")
nA = NA = unit("nA", 10**-9 * A, "nanoampere")
uA = UA = unit("uA", 10**-6 * A, "microampere")
mA = MA = unit("mA", 10**-3 * A, "milliampere")
cA = CA = unit("cA", 10**-2 * A, "centiampere")
dA = DA = unit("dA", 10**-1 * A, "deciampere")
K = K = unit("K", 0, "kelvin")
YK = YK = unit("YK", 10**24 * K, "yottakelvin")
ZK = ZK = unit("ZK", 10**21 * K, "zettakelvin")
EK = EK = unit("EK", 10**18 * K, "exakelvin")
PK = PK = unit("PK", 10**15 * K, "petakelvin")
TK = TK = unit("TK", 10**12 * K, "terakelvin")
GK = GK = unit("GK", 10**9 * K, "gigakelvin")
MK = MK = unit("MK", 10**6 * K, "megakelvin")
kK = KK = unit("kK", 10**3 * K, "kilokelvin")
hK = HK = unit("hK", 10**2 * K, "hectokelvin")
daK = DAK = unit("daK", 10**1 * K, "decakelvin")
yK = YK = unit("yK", 10**-24 * K, "yoctokelvin")
zK = ZK = unit("zK", 10**-21 * K, "zeptokelvin")
aK = AK = unit("aK", 10**-18 * K, "attokelvin")
fK = FK = unit("fK", 10**-15 * K, "femtokelvin")
pK = PK = unit("pK", 10**-12 * K, "picokelvin")
nK = NK = unit("nK", 10**-9 * K, "nanokelvin")
uK = UK = unit("uK", 10**-6 * K, "microkelvin")
mK = MK = unit("mK", 10**-3 * K, "millikelvin")
cK = CK = unit("cK", 10**-2 * K, "centikelvin")
dK = DK = unit("dK", 10**-1 * K, "decikelvin")
mol = MOL = unit("mol", 0, "mole")
Ymol = YMOL = unit("Ymol", 10**24 * mol, "yottamole")
Zmol = ZMOL = unit("Zmol", 10**21 * mol, "zettamole")
Emol = EMOL = unit("Emol", 10**18 * mol, "examole")
Pmol = PMOL = unit("Pmol", 10**15 * mol, "petamole")
Tmol = TMOL = unit("Tmol", 10**12 * mol, "teramole")
Gmol = GMOL = unit("Gmol", 10**9 * mol, "gigamole")
Mmol = MMOL = unit("Mmol", 10**6 * mol, "megamole")
kmol = KMOL = unit("kmol", 10**3 * mol, "kilomole")
hmol = HMOL = unit("hmol", 10**2 * mol, "hectomole")
damol = DAMOL = unit("damol", 10**1 * mol, "decamole")
ymol = YMOL = unit("ymol", 10**-24 * mol, "yoctomole")
zmol = ZMOL = unit("zmol", 10**-21 * mol, "zeptomole")
amol = AMOL = unit("amol", 10**-18 * mol, "attomole")
fmol = FMOL = unit("fmol", 10**-15 * mol, "femtomole")
pmol = PMOL = unit("pmol", 10**-12 * mol, "picomole")
nmol = NMOL = unit("nmol", 10**-9 * mol, "nanomole")
umol = UMOL = unit("umol", 10**-6 * mol, "micromole")
mmol = MMOL = unit("mmol", 10**-3 * mol, "millimole")
cmol = CMOL = unit("cmol", 10**-2 * mol, "centimole")
dmol = DMOL = unit("dmol", 10**-1 * mol, "decimole")
cd = CD = unit("cd", 0, "candela")
Ycd = YCD = unit("Ycd", 10**24 * cd, "yottacandela")
Zcd = ZCD = unit("Zcd", 10**21 * cd, "zettacandela")
Ecd = ECD = unit("Ecd", 10**18 * cd, "exacandela")
Pcd = PCD = unit("Pcd", 10**15 * cd, "petacandela")
Tcd = TCD = unit("Tcd", 10**12 * cd, "teracandela")
Gcd = GCD = unit("Gcd", 10**9 * cd, "gigacandela")
Mcd = MCD = unit("Mcd", 10**6 * cd, "megacandela")
kcd = KCD = unit("kcd", 10**3 * cd, "kilocandela")
hcd = HCD = unit("hcd", 10**2 * cd, "hectocandela")
dacd = DACD = unit("dacd", 10**1 * cd, "decacandela")
ycd = YCD = unit("ycd", 10**-24 * cd, "yoctocandela")
zcd = ZCD = unit("zcd", 10**-21 * cd, "zeptocandela")
acd = ACD = unit("acd", 10**-18 * cd, "attocandela")
fcd = FCD = unit("fcd", 10**-15 * cd, "femtocandela")
pcd = PCD = unit("pcd", 10**-12 * cd, "picocandela")
ncd = NCD = unit("ncd", 10**-9 * cd, "nanocandela")
ucd = UCD = unit("ucd", 10**-6 * cd, "microcandela")
mcd = MCD = unit("mcd", 10**-3 * cd, "millicandela")
ccd = CCD = unit("ccd", 10**-2 * cd, "centicandela")
dcd = DCD = unit("dcd", 10**-1 * cd, "decicandela")
kg = KG = unit("kg", 0, "kilogram")
Yg = YG = unit("Yg", 10**21 * kg, "yottagram")
Zg = ZG = unit("Zg", 10**18 * kg, "zettagram")
Eg = EG = unit("Eg", 10**15 * kg, "exagram")
Pg = PG = unit("Pg", 10**12 * kg, "petagram")
Tg = TG = unit("Tg", 10**9 * kg, "teragram")
Gg = GG = unit("Gg", 10**6 * kg, "gigagram")
Mg = MG = unit("Mg", 10**3 * kg, "megagram")
hg = HG = unit("hg", 10**-1 * kg, "hectogram")
dag = DAG = unit("dag", 10**-2 * kg, "decagram")
yg = YG = unit("yg", 10**-27 * kg, "yoctogram")
zg = ZG = unit("zg", 10**-24 * kg, "zeptogram")
ag = AG = unit("ag", 10**-21 * kg, "attogram")
fg = FG = unit("fg", 10**-18 * kg, "femtogram")
pg = PG = unit("pg", 10**-15 * kg, "picogram")
ng = NG = unit("ng", 10**-12 * kg, "nanogram")
ug = UG = unit("ug", 10**-9 * kg, "microgram")
mg = MG = unit("mg", 10**-6 * kg, "milligram")
cg = CG = unit("cg", 10**-5 * kg, "centigram")
dg = DG = unit("dg", 10**-4 * kg, "decigram")
g = unit("g", 10**-3 * kg, "gram")
# cleaning
del Unum
del unit | /robotpy-toolkit-7407-0.3.2.tar.gz/robotpy-toolkit-7407-0.3.2/robotpy_toolkit_7407/unum/units/si/base.py | 0.595375 | 0.441131 | base.py | pypi |
from math import pi
from robotpy_toolkit_7407.unum import Unum
from robotpy_toolkit_7407.unum.units.si import *
unit = Unum.unit
min = MIN = unit( 'min' , 60 * s , 'minute' )
h = H = unit( 'h' , 60 * MIN , 'hour' )
d = D = unit( 'd' , 24 * H , 'day' )
deg = ARCDEG = unit( 'deg' , pi/180 * RAD , 'degree (angle)' )
arcmin = ARCMIN = unit( "'" , ARCDEG / 60 , 'minute (angle)' )
arcsec = ARCSEC = unit( "''" , ARCMIN / 60 , 'second (angle)' )
l = L = unit( 'L' , 1E-3 * M**3 , 'liter' )
t = TON = unit( 't' , 1E3 * KG , 'metric ton' )
Np = NP = unit( 'Np' , 1 , 'neper' )
dB = DECIBEL = unit( 'dB' , 0 , 'decibel' )
eV = EV = unit( 'eV' , 1.60218E-19 * J , 'electronvolt' )
u = U = unit( 'u' , 1.66054E-27 * KG , 'unified atomic mass unit' )
ua = AU = UA = unit( 'ua' , 1.49598E11 * M , 'astronomical unit' )
mile = MILE = unit( 'mile' , 1609.34 * M , 'statute mile' )
nmile = NMILE = unit( 'nmi' , 1852 * M , 'nautical mile' )
knot = KNOT = unit( 'knot' , MILE / H , 'knot' )
a = ARE = unit( 'a' , 1E2 * M**2 , 'are' )
ha = HA = unit( 'ha' , 1E4 * M**2 , 'hectare' )
bar = BAR = unit( 'bar' , 1E5 * PA , 'bar' )
angstrom = ANGSTROM = unit( 'angstrom' , 1E-10 * M , 'angstrom' )
b = B = unit( 'b' , 1E-28 * M**2 , 'barn' )
Ci = CI = unit( 'Ci' , 3.7E10 * BQ , 'curie' )
R = R = unit( 'R' , 2.58E-4 * C / KG , 'roentgen' )
rem = REM = unit( 'rem' , 1E-2 * SV , 'rem' )
# Note : 'rad' defined as 1E-2 Gy as been left out because it conflits with
# using 'rad' for radians.
# cleaning
del Unum
del unit
del pi | /robotpy-toolkit-7407-0.3.2.tar.gz/robotpy-toolkit-7407-0.3.2/robotpy_toolkit_7407/unum/units/others/__init__.py | 0.650578 | 0.221551 | __init__.py | pypi |
import math
from robotpy_toolkit_7407.unum import Unum
from wpimath.geometry import Rotation2d, Pose2d, Translation2d
from wpimath.kinematics import SwerveDrive4Odometry, SwerveDrive4Kinematics, SwerveModuleState
from robotpy_toolkit_7407.oi.joysticks import JoystickAxis
from robotpy_toolkit_7407.subsystem import Subsystem
from robotpy_toolkit_7407.utils import logger
from robotpy_toolkit_7407.utils.math import rotate_vector, bounded_angle_diff
from robotpy_toolkit_7407.utils.units import s, m, deg, rad, hour, mile, rev
def translation(x: Unum, y: Unum) -> Translation2d:
return Translation2d(x.asNumber(m), y.asNumber(m))
class SwerveNode:
_motor_reversed: bool
_motor_sensor_offset: Unum = rad
def init(self):
self._motor_reversed = False
self._motor_sensor_offset = 0 * rad
def set(self, vel: Unum, angle_radians: Unum):
self._set_angle(angle_radians, self.get_current_motor_angle() + self._motor_sensor_offset)
self.set_motor_velocity(vel if not self._motor_reversed else -vel)
# OVERRIDDEN FUNCTIONS
def set_motor_angle(self, pos: Unum): ...
def get_current_motor_angle(self) -> Unum: ...
def set_motor_velocity(self, vel: Unum): ...
def get_motor_velocity(self) -> Unum: ...
# 0 degrees is facing right
def _set_angle(self, target_angle: Unum, initial_angle: Unum):
target_sensor_angle, flipped, flip_sensor_offset = SwerveNode._resolve_angles(target_angle, initial_angle)
target_sensor_angle -= self._motor_sensor_offset
if flipped:
self._motor_reversed = not self._motor_reversed
self._motor_sensor_offset += flip_sensor_offset
self.set_motor_angle(target_sensor_angle)
@staticmethod
def _resolve_angles(target_angle: Unum, initial_angle: Unum) -> (float, bool, float):
"""
:param target_angle: Target node angle
:param initial_angle: Initial node sensor angle
:return: (target_sensor_angle, flipped, flip_sensor_offset)
"""
target_rad = target_angle.asNumber(rad)
initial_rad = initial_angle.asNumber(rad)
# Actual angle difference in radians
diff = bounded_angle_diff(initial_rad, target_rad)
# Should we flip
if abs(diff) > 0.65 * math.pi:
flip_sensor_offset = math.pi if diff > 0 else -math.pi
diff -= flip_sensor_offset
return (diff + initial_rad) * rad, True, flip_sensor_offset * rad
return (diff + initial_rad) * rad, False, 0 * rad
class SwerveGyro:
def init(self): ...
def get_robot_heading(self) -> Unum: ...
def reset_angle(self): ...
class SwerveDrivetrain(Subsystem):
n_00: SwerveNode # Top Left
n_01: SwerveNode # Bottom Left
n_10: SwerveNode # Top Right
n_11: SwerveNode # Bottom Right
gyro: SwerveGyro
axis_dx: JoystickAxis
axis_dy: JoystickAxis
axis_rotation: JoystickAxis
track_width: Unum = 1 * m
max_vel: Unum = 20 * mile/hour
max_angular_vel: Unum = 4 * rev/s
deadzone_velocity: Unum = 0.05 * m/s
deadzone_angular_velocity: Unum = 5 * deg/s
start_pose: Pose2d = Pose2d(0, 0, 0)
def __init__(self):
super().__init__()
self.kinematics: SwerveDrive4Kinematics | None = None
self.odometry: SwerveDrive4Odometry | None = None
self._omega = 0 * rad/s
def init(self):
logger.info("initializing swerve drivetrain", "[swerve_drivetrain]")
self.n_00.init()
self.n_01.init()
self.n_10.init()
self.n_11.init()
self.gyro.init()
logger.info("initializing odometry", "[swerve_drivetrain]")
self.kinematics = SwerveDrive4Kinematics(
translation(-.5 * self.track_width, -.5 * self.track_width),
translation(-.5 * self.track_width, .5 * self.track_width),
translation(.5 * self.track_width, -.5 * self.track_width),
translation(.5 * self.track_width, .5 * self.track_width)
)
self.odometry = SwerveDrive4Odometry(
self.kinematics,
Rotation2d(self.gyro.get_robot_heading().asNumber(rad)),
self.start_pose
)
logger.info("initialization complete", "[swerve_drivetrain]")
def set(self, vel: (Unum, Unum), angular_vel: Unum):
self._omega = angular_vel # For simulation
vel = rotate_vector(vel[0], vel[1], -self.gyro.get_robot_heading())
if abs(vel[0]) < self.deadzone_velocity and abs(vel[1]) < self.deadzone_velocity and \
abs(angular_vel) < self.deadzone_angular_velocity:
self.n_00.set_motor_velocity(0 * m/s)
self.n_01.set_motor_velocity(0 * m/s)
self.n_10.set_motor_velocity(0 * m/s)
self.n_11.set_motor_velocity(0 * m/s)
else:
self.n_00.set(*self._calculate_swerve_node(
-.5 * self.track_width, -.5 * self.track_width,
vel[0], vel[1], angular_vel
))
self.n_01.set(*self._calculate_swerve_node(
-.5 * self.track_width, .5 * self.track_width,
vel[0], vel[1], angular_vel
))
self.n_10.set(*self._calculate_swerve_node(
.5 * self.track_width, -.5 * self.track_width,
vel[0], vel[1], angular_vel
))
self.n_11.set(*self._calculate_swerve_node(
.5 * self.track_width, .5 * self.track_width,
vel[0], vel[1], angular_vel
))
self.odometry.update(
Rotation2d(self.gyro.get_robot_heading().asNumber(rad)),
SwerveModuleState(self.n_00.get_motor_velocity().asNumber(m/s), Rotation2d(self.n_00.get_current_motor_angle().asNumber(rad))),
SwerveModuleState(self.n_01.get_motor_velocity().asNumber(m/s), Rotation2d(self.n_01.get_current_motor_angle().asNumber(rad))),
SwerveModuleState(self.n_10.get_motor_velocity().asNumber(m/s), Rotation2d(self.n_10.get_current_motor_angle().asNumber(rad))),
SwerveModuleState(self.n_11.get_motor_velocity().asNumber(m/s), Rotation2d(self.n_11.get_current_motor_angle().asNumber(rad)))
)
def stop(self):
self.n_00.set(0 * m/s, 0 * rad/s)
self.n_01.set(0 * m/s, 0 * rad/s)
self.n_10.set(0 * m/s, 0 * rad/s)
self.n_11.set(0 * m/s, 0 * rad/s)
@staticmethod
def _calculate_swerve_node(node_x: Unum, node_y: Unum, dx: Unum, dy: Unum, d_theta: Unum) -> (Unum, Unum):
tangent_x, tangent_y = -node_y, node_x
tangent_m = math.sqrt(tangent_x.asNumber(m)**2 + tangent_y.asNumber(m)**2) * m
tangent_x /= tangent_m / m
tangent_y /= tangent_m / m
r = math.sqrt(2) / 2
sx = dx + r * d_theta * tangent_x
sy = dy + r * d_theta * tangent_y
sx_u = sx.asNumber(m/s)
sy_u = sy.asNumber(m/s)
theta = math.atan2(sy_u, sx_u) * rad
magnitude = math.sqrt(sx_u ** 2 + sy_u ** 2) * m/s
return magnitude, theta | /robotpy-toolkit-7407-0.3.2.tar.gz/robotpy-toolkit-7407-0.3.2/robotpy_toolkit_7407/subsystem_templates/drivetrain/swerve_drivetrain.py | 0.772616 | 0.469338 | swerve_drivetrain.py | pypi |
from robotpy_toolkit_7407.unum import Unum
from robotpy_toolkit_7407.command import SubsystemCommand, T
from robotpy_toolkit_7407.motors.ctre_motors import talon_sensor_vel_unit
from robotpy_toolkit_7407.subsystem_templates.drivetrain.differential_drivetrain import DifferentialDrivetrain
from robotpy_toolkit_7407.utils.math import clamp, sensor_units_to_inches, inches_to_sensor_units
from robotpy_toolkit_7407.utils.units import m, s
# TODO Redo this to make it like swerve drivetrain commands
class DriveArcade(SubsystemCommand[DifferentialDrivetrain]):
def __init__(self, subsystem: T, track_width_inches: float):
super().__init__(subsystem)
self.track_width_inches = track_width_inches
def initialize(self) -> None:
pass
def execute(self) -> None:
x_axis, y_axis = self.subsystem.axis_x.value, self.subsystem.axis_y.value
x_axis, y_axis = self._add_dead_zones(x_axis, y_axis)
left, right = self._turn_radius_drive(x_axis, y_axis, self.track_width_inches)
self.subsystem.set_motor_velocity(left * talon_sensor_vel_unit, -right * talon_sensor_vel_unit)
def end(self, interrupted: bool) -> None:
self.subsystem.set_motor_velocity(0 * m/s, 0 * m/s)
def isFinished(self) -> bool:
return False
def runsWhenDisabled(self) -> bool:
return False
@staticmethod
def _add_dead_zones(x_axis: float, y_axis: float) -> tuple[float, float]:
if abs(x_axis) < 0.2:
x_axis = 0
if abs(y_axis) < 0.2:
y_axis = 0
return x_axis, y_axis
@staticmethod
def _arcade_drive(x_axis: float, y_axis: float) -> tuple[float, float]:
left = clamp(y_axis + x_axis, -1, 1)
right = clamp(y_axis - x_axis, -1, 1)
# left *= 18000
# right *= 18000
return left, right
@staticmethod
def _turn_radius_drive(x_axis: float, y_axis: float, track_width_inches: float) -> tuple[float, float]:
if y_axis > 0:
x_axis = -x_axis
# x_axis *= abs(x_axis)
velocity_sensor_units = 18000 * y_axis
target_velocity = sensor_units_to_inches(velocity_sensor_units, True)
if x_axis > 0:
turn_radius = 120.0 * (1.05 - x_axis)
elif x_axis < 0:
turn_radius = 120.0 * (-1.05 - x_axis)
else:
turn_radius = 0
if x_axis == 0:
velocity_difference = 0
elif target_velocity == 0 or turn_radius == 0:
velocity_difference = sensor_units_to_inches(-18000.0 * x_axis, True)
else:
velocity_difference = (track_width_inches * target_velocity) / turn_radius
left = inches_to_sensor_units(target_velocity - velocity_difference, True)
right = inches_to_sensor_units(target_velocity + velocity_difference, True)
left = clamp(left, -18000, 18000)
right = clamp(right, -18000, 18000)
return left, right | /robotpy-toolkit-7407-0.3.2.tar.gz/robotpy-toolkit-7407-0.3.2/robotpy_toolkit_7407/subsystem_templates/drivetrain/differential_drivetrain_commands.py | 0.622 | 0.4133 | differential_drivetrain_commands.py | pypi |
import inspect
import logging.config
import os
from robotpy_toolkit_7407.utils.color import Color, NoColor
"""
Logger utility for debugging.
Example usage:
utils.logger.Logger.log_info("testing")
"""
def get_default_logging():
return {
"version": 1,
"formatters": {
"standard": {
"format": Color.RED
+ "%(asctime)s,%(msecs)d"
+ Color.END
+ Color.PURPLE
+ " %(levelname)-8s"
+ Color.END
+ " %(message)s",
"datefmt": "%Y-%m-%d:%H:%M:%S",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "standard",
"level": "INFO",
"stream": "ext://sys.stdout",
},
"default": {
"formatter": "standard",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr",
},
"access": {
"formatter": "standard",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
},
# "asgi": {
# "formatter": "standard",
# "class": "logging.StreamHandler",
# "stream": "ext://sys.stdout",
# },
},
"loggers": {
__name__: {"level": "INFO", "handlers": ["console"], "propagate": False,},
"": {"handlers": ["default"], "level": "INFO"},
"uvicorn.error": {"handlers": ["default"], "level": "INFO", "propagate": False},
"uvicorn.access": {"handlers": ["access"], "level": "INFO", "propagate": False},
# "uvicorn.asgi": {"handlers": ["asgi"], "level": "TRACE", "propagate": False},
},
}
class Logger:
def __init__(self, logging_config=None):
if logging_config is None:
logging_config = get_default_logging()
logging.config.dictConfig(logging_config)
self.MAX_FILENAME_LENGTH = 0
self.root_folder = os.path.dirname(
os.path.dirname(inspect.currentframe().f_code.co_filename)
)
def _log_function(
self, func, msg: str, header=None, frame=None, traceback_length=5
) -> str:
"""
Internal colored logging function.
"""
if not frame:
frame = inspect.currentframe().f_back
file_name = os.path.basename(frame.f_code.co_filename)
line_no = str(frame.f_lineno)
caller = ""
for i in range(traceback_length):
if frame is None:
break
temp_folder_name = os.path.dirname(frame.f_code.co_filename)
if self.root_folder == temp_folder_name:
caller = "(" + frame.f_code.co_name + ") " + caller
frame = frame.f_back
else:
continue
if caller:
msg = Color.GREEN + caller + Color.END + msg
if header:
msg = Color.YELLOW + header + Color.END + " " + msg
filename_display = " [" + file_name + ":" + line_no + "] "
if len(filename_display) > self.MAX_FILENAME_LENGTH:
self.MAX_FILENAME_LENGTH = len(filename_display)
msg = (
Color.CYAN
+ filename_display.ljust(self.MAX_FILENAME_LENGTH)
+ Color.END
+ msg
)
func(msg)
return msg
@classmethod
def log_info(
cls, msg: str, header=None, frame=None, traceback_length: int = 5
) -> str:
"""
Logs info
"""
log = logging.getLogger(__name__)
if frame:
frame = frame.f_back
else:
frame = inspect.currentframe().f_back
return cls()._log_function(log.info, str(msg), header, frame, traceback_length)
@classmethod
def log_error(
cls, msg: str, header=None, frame=None, traceback_length: int = 5
) -> str:
"""
Logs errors
"""
log = logging.getLogger(__name__)
if frame:
frame = frame.f_back
else:
frame = inspect.currentframe().f_back
return cls()._log_function(log.error, str(msg), header, frame, traceback_length)
@classmethod
def log_warning(
cls, msg: str, header=None, frame=None, traceback_length: int = 5
) -> str:
"""
Logs errors
"""
log = logging.getLogger(__name__)
if frame:
frame = frame.f_back
else:
frame = inspect.currentframe().f_back
return cls()._log_function(log.warning, str(msg), header, frame, traceback_length)
@classmethod
def print_function_call(cls, params=None, header="") -> str:
"""
Prints function calls and details associated with the call
"""
frame = inspect.currentframe().f_back
if params:
return cls().log_info(
"Called "
+ inspect.getmodule(frame).__name__
+ "."
+ frame.f_code.co_name
+ " with parameters: "
+ str(params),
header,
frame,
)
else:
return cls().log_info(
"Called "
+ inspect.getmodule(frame).__name__
+ "."
+ frame.f_code.co_name,
header,
frame,
)
info = Logger.log_info
error = Logger.log_error
warning = Logger.log_warning
warn = warning | /robotpy-toolkit-7407-0.3.2.tar.gz/robotpy-toolkit-7407-0.3.2/robotpy_toolkit_7407/utils/logger.py | 0.504639 | 0.165762 | logger.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
import ctre
from robotpy_toolkit_7407.unum import Unum
from robotpy_toolkit_7407.motor import PIDMotor
from robotpy_toolkit_7407.utils.units import rad, rev, s
@dataclass
class TalonConfig:
k_P: Optional[float] = None
k_I: Optional[float] = None
k_D: Optional[float] = None
k_F: Optional[float] = None
closed_loop_peak_output: Optional[float] = None
motion_cruise_velocity: Optional[Unum] = None
motion_acceleration: Optional[Unum] = None
neutral_brake: Optional[bool] = None
integral_zone: Optional[float] = None
max_integral_accumulator: Optional[float] = None
talon_sensor_unit = Unum.unit("talon_sensor_u", rev / 2048, "talon sensor unit")
hundred_ms = Unum.unit("100ms", s / 10, "100 milliseconds")
talon_sensor_vel_unit = talon_sensor_unit / hundred_ms
talon_sensor_accel_unit = talon_sensor_vel_unit / s
class _Talon(PIDMotor):
_motor: ctre.BaseTalon
def __init__(self, can_id: int, inverted: bool = False, config: TalonConfig = None):
super().__init__()
self._can_id = can_id
self._config = config
self._inverted = inverted
def get_sensor_position(self) -> Unum:
return self._motor.getSelectedSensorPosition(0) * talon_sensor_unit
def set_sensor_position(self, pos: Unum):
self._motor.setSelectedSensorPosition(pos.asNumber(talon_sensor_unit))
def get_sensor_velocity(self) -> Unum:
return self._motor.getSelectedSensorVelocity(0) * talon_sensor_vel_unit
def set_raw_output(self, x: float):
self._motor.set(ctre.ControlMode.PercentOutput, x)
def set_target_position(self, pos: Unum):
self._motor.set(ctre.ControlMode.MotionMagic, pos.asNumber(talon_sensor_unit))
def set_target_velocity(self, vel: Unum):
self._motor.set(ctre.ControlMode.Velocity, vel.asNumber(talon_sensor_vel_unit))
def follow(self, master: _Talon):
self._motor.follow(master._motor)
def _set_config(self, config: Optional[TalonConfig]):
if config is None:
return
if config.k_P is not None:
self._motor.config_kP(0, config.k_P)
if config.k_I is not None:
self._motor.config_kI(0, config.k_I)
if config.k_D is not None:
self._motor.config_kD(0, config.k_D)
if config.k_F is not None:
self._motor.config_kF(0, config.k_F)
if config.closed_loop_peak_output is not None:
self._motor.configClosedLoopPeakOutput(0, config.closed_loop_peak_output)
if config.motion_cruise_velocity is not None:
self._motor.configMotionCruiseVelocity(config.motion_cruise_velocity.asNumber(talon_sensor_vel_unit))
if config.motion_acceleration is not None:
self._motor.configMotionAcceleration(config.motion_acceleration.asNumber(talon_sensor_accel_unit))
if config.neutral_brake is not None:
self._motor.setNeutralMode(ctre.NeutralMode.Brake if config.neutral_brake else ctre.NeutralMode.Coast)
if config.integral_zone is not None:
self._motor.config_IntegralZone(0, config.integral_zone)
if config.max_integral_accumulator is not None:
self._motor.configMaxIntegralAccumulator(0, config.max_integral_accumulator)
class TalonFX(_Talon):
def init(self):
self._motor = ctre.TalonFX(self._can_id)
self._set_config(self._config)
self._motor.setInverted(self._inverted)
class TalonSRX(_Talon):
def init(self):
self._motor = ctre.TalonSRX(self._can_id)
self._set_config(self._config)
self._motor.setInverted(self._inverted)
class VictorSPX(_Talon):
def init(self):
self._motor = ctre.VictorSPX(self._can_id)
self._set_config(self._config)
self._motor.setInverted(self._inverted)
class TalonGroup(PIDMotor):
motors: list[_Talon]
def __init__(self, *motors: _Talon, config: TalonConfig = None, leader_idx: int = 0):
super().__init__()
self.motors = list(motors)
for m in self.motors:
m._config = config
self._leader_idx = leader_idx
def init(self):
self.motors[self._leader_idx].init()
for idx, motor in enumerate(self.motors):
if idx != self._leader_idx:
motor.init()
motor.follow(self.motors[self._leader_idx])
def set_leader_idx(self, idx: int):
self._leader_idx = idx
# self.motors[self._leader_idx].set_raw_output(0) # Maybe?
for idx, motor in enumerate(self.motors):
if idx != self._leader_idx:
motor.follow(self.motors[self._leader_idx])
def get_sensor_position(self) -> Unum:
return self.motors[self._leader_idx].get_sensor_position()
def set_sensor_position(self, pos: Unum):
self.motors[self._leader_idx].set_sensor_position(pos)
def get_sensor_velocity(self) -> Unum:
return self.motors[self._leader_idx].get_sensor_velocity()
def set_raw_output(self, x: float):
self.motors[self._leader_idx].set_raw_output(x)
def set_target_position(self, pos: Unum):
self.motors[self._leader_idx].set_target_position(pos)
def set_target_velocity(self, vel: Unum):
self.motors[self._leader_idx].set_target_velocity(vel) | /robotpy-toolkit-7407-0.3.2.tar.gz/robotpy-toolkit-7407-0.3.2/robotpy_toolkit_7407/motors/ctre_motors.py | 0.886137 | 0.182189 | ctre_motors.py | pypi |
from dataclasses import dataclass
from typing import Optional
from rev import CANSparkMax, SparkMaxPIDController, SparkMaxRelativeEncoder
from robotpy_toolkit_7407.unum import Unum
from robotpy_toolkit_7407.motor import PIDMotor
from robotpy_toolkit_7407.utils.units import rev, minute
@dataclass
class SparkMaxConfig:
k_P: Optional[float] = None
k_I: Optional[float] = None
k_D: Optional[float] = None
k_F: Optional[float] = None
output_range: Optional[tuple[float, float]] = None
idle_mode: Optional[CANSparkMax.IdleMode] = None
class SparkMax(PIDMotor):
_motor: CANSparkMax
__pid_controller: SparkMaxPIDController
__encoder: SparkMaxRelativeEncoder
def __init__(self, can_id: int, inverted: bool = True, brushless: bool = True, config: SparkMaxConfig = None):
super().__init__()
self._can_id = can_id
self._inverted = inverted
self._brushless = brushless
self._config = config
def init(self):
self._motor = CANSparkMax(
self._can_id,
CANSparkMax.MotorType.kBrushless if self._brushless else CANSparkMax.MotorType.kBrushed
)
self._motor.setInverted(self._inverted)
self.__pid_controller = self._motor.getPIDController()
self.__encoder = self._motor.getEncoder()
self._set_config(self._config)
def set_raw_output(self, x: float):
self._motor.set(x)
def set_target_position(self, pos: Unum):
self.__pid_controller.setReference(pos.asNumber(rev), CANSparkMax.ControlType.kPosition)
def set_target_velocity(self, vel: Unum):
self.__pid_controller.setReference(vel.asNumber(rev / minute), CANSparkMax.ControlType.kVelocity)
def get_sensor_position(self) -> Unum:
return self.__encoder.getPosition() * rev
def set_sensor_position(self, pos: Unum):
self.__encoder.setPosition(pos.asNumber(rev))
def get_sensor_velocity(self) -> Unum:
return self.__encoder.getVelocity() * (rev / minute)
def _set_config(self, config: SparkMaxConfig):
if config is None:
return
if config.k_P is not None:
self.__pid_controller.setP(config.k_P)
if config.k_I is not None:
self.__pid_controller.setI(config.k_I)
if config.k_D is not None:
self.__pid_controller.setD(config.k_D)
if config.k_F is not None:
self.__pid_controller.setFF(config.k_F)
if config.output_range is not None:
self.__pid_controller.setOutputRange(config.output_range[0], config.output_range[1])
if config.idle_mode is not None:
self._motor.setIdleMode(config.idle_mode) | /robotpy-toolkit-7407-0.3.2.tar.gz/robotpy-toolkit-7407-0.3.2/robotpy_toolkit_7407/motors/rev_motors.py | 0.894564 | 0.246154 | rev_motors.py | pypi |
import logging
from typing import Any, Dict, Optional
logger = logging.getLogger(__name__)
class MagicInjectError(ValueError):
pass
def get_injection_requests(
type_hints: Dict[str, type], cname: str, component: Optional[Any] = None
) -> Dict[str, type]:
"""
Given a dict of type hints, filter it to the requested injection types.
:param type_hints: The type hints to inspect.
:param cname: The component name.
:param component: The component if it has been instantiated.
"""
requests = {}
for n, inject_type in type_hints.items():
# If the variable is private ignore it
if n.startswith("_"):
if component is None:
message = f"Cannot inject into component {cname} __init__ param {n}"
raise MagicInjectError(message)
continue
# If the variable has been set, skip it
if component is not None and hasattr(component, n):
continue
# Check for generic types from the typing module
origin = getattr(inject_type, "__origin__", None)
if origin is not None:
inject_type = origin
# If the type is not actually a type, give a meaningful error
if not isinstance(inject_type, type):
raise TypeError(
f"Component {cname} has a non-type annotation {n}: {inject_type}\n"
"Lone non-injection variable annotations are disallowed, did you want to assign a static variable?"
)
requests[n] = inject_type
return requests
def find_injections(
requests: Dict[str, type], injectables: Dict[str, Any], cname: str
) -> Dict[str, Any]:
"""
Get a dict of the variables to inject into a given component.
:param requests: The mapping of requested variables to types,
as returned by :func:`get_injection_requests`.
:param injectables: The available variables to inject.
:param cname: The name of the component.
"""
to_inject = {}
for n, inject_type in requests.items():
injectable = injectables.get(n)
if injectable is None:
# Try prefixing the component name
injectable = injectables.get(f"{cname}_{n}")
# Raise error if injectable syntax used but no injectable was found.
if injectable is None:
raise MagicInjectError(
"Component %s has variable %s (type %s), which is absent from robot"
% (cname, n, inject_type)
)
# Raise error if injectable declared with type different than the initial type
if not isinstance(injectable, inject_type):
raise MagicInjectError(
"Component %s variable %s does not match type in robot! (Got %s, expected %s)"
% (cname, n, type(injectable), inject_type)
)
to_inject[n] = injectable
logger.debug("-> %s.%s = %s", cname, n, injectable)
return to_inject | /robotpy_wpilib_utilities-2023.1.0-py3-none-any.whl/magicbot/inject.py | 0.871919 | 0.287187 | inject.py | pypi |
import logging
class MagicComponent:
"""
To automagically retrieve variables defined in your base robot
object, you can add the following::
class MyComponent:
# other variables 'imported' automatically from MagicRobot
elevator_motor: Talon
other_component: MyOtherComponent
...
def execute(self):
# This will be automatically set to the Talon
# instance created in robot.py
self.elevator_motor.set(self.value)
What this says is "find the variable in the robot class called
'elevator_motor', which is a Talon". If the name and type match,
then the variable will automatically be injected into your
component when it is created.
.. note:: You don't need to inherit from ``MagicComponent``, it is only
provided for documentation's sake
"""
logger: logging.Logger
def setup(self) -> None:
"""
This function is called after ``createObjects`` has been called in
the main robot class, and after all components have been created
The setup function is optional and components do not have to define
one. ``setup()`` functions are called in order of component definition
in the main robot class.
.. note:: For technical reasons, variables imported from
MagicRobot are not initialized when your component's
constructor is called. However, they will be initialized
by the time this function is called.
"""
def on_enable(self) -> None:
"""
Called when the robot enters autonomous or teleoperated mode. This
function should initialize your component to a "safe" state so
that unexpected things don't happen when enabling the robot.
.. note:: You'll note that there isn't a separate initialization
function for autonomous and teleoperated modes. This is
intentional, as they should be the same.
"""
def on_disable(self) -> None:
"""
Called when the robot leaves autonomous or teleoperated
"""
def execute(self) -> None:
"""
This function is called at the end of the control loop
""" | /robotpy_wpilib_utilities-2023.1.0-py3-none-any.whl/magicbot/magiccomponent.py | 0.599368 | 0.320163 | magiccomponent.py | pypi |
from functools import partial
import wpilib
class Toggle:
"""Utility class for joystick button toggle
Usage::
foo = Toggle(joystick, 3)
if foo:
toggleFunction()
if foo.on:
onToggle()
if foo.off:
offToggle()
"""
class _SteadyDebounce:
"""
Similar to ButtonDebouncer, but the output stays steady for
the given periodic_filter. E.g, if you set the period to 2
and press the button, the value will return true for 2 seconds.
Steady debounce will return true for the given period, allowing it to be
used with Toggle
"""
def __init__(self, joystick: wpilib.Joystick, button: int, period: float):
"""
:param joystick: Joystick object
:type joystick: :class:`wpilib.Joystick`
:param button: Number of button to retrieve
:type button: int
:param period: Period of time (in seconds) to wait before allowing new button
presses. Defaults to 0.5 seconds.
:type period: float
"""
self.joystick = joystick
self.button = button
self.debounce_period = float(period)
self.latest = (
-self.debounce_period
) # Negative latest prevents get from returning true until joystick is presed for the first time
self.enabled = False
def get(self):
"""
:returns: The value of the joystick button. Once the button is pressed,
the return value will be `True` until the time expires
"""
now = wpilib.Timer.getFPGATimestamp()
if now - self.latest < self.debounce_period:
return True
if self.joystick.getRawButton(self.button):
self.latest = now
return True
else:
return False
def __init__(
self, joystick: wpilib.Joystick, button: int, debounce_period: float = None
):
"""
:param joystick: :class:`wpilib.Joystick` that contains the button to toggle
:param button: Number of button that will act as toggle. Same value used in `getRawButton()`
:param debounce_period: Period in seconds to wait before registering a new button press.
"""
if debounce_period is not None:
self.joystickget = Toggle._SteadyDebounce(
joystick, button, debounce_period
).get
else:
self.joystick = joystick
self.joystickget = partial(self.joystick.getRawButton, button)
self.released = False
self.toggle = False
self.state = False
def get(self):
"""
:return: State of toggle
:rtype: bool
"""
current_state = self.joystickget()
if current_state and not self.released:
self.released = True
self.toggle = not self.toggle
self.state = not self.state # Toggles between 1 and 0.
elif not current_state and self.released:
self.released = False
return self.toggle
@property
def on(self):
"""
Equates to true if toggle is in the 'on' state
"""
self.get()
return self.state
@property
def off(self):
"""
Equates to true if toggle is in the 'off' state
"""
self.get()
return not self.state
__bool__ = get | /robotpy_wpilib_utilities-2023.1.0-py3-none-any.whl/robotpy_ext/control/toggle.py | 0.874305 | 0.470068 | toggle.py | pypi |
class Unit(object):
"""The class for all of the units here"""
def __init__(self, base_unit, base_to_unit, unit_to_base):
"""
Unit constructor, used as a mechanism to convert between various measurements
:param base_unit: The instance of Unit to base conversions from. If None, then assume it is the ultimate base unit
:param base_to_unit: A callable to convert measurements between this unit and the base unit
:param unit_to_base: A callable to convert measurements between the base unit and this unit
"""
self.base_unit = base_unit
self.base_to_unit = base_to_unit
self.unit_to_base = unit_to_base
def convert(source_unit, target_unit, value):
"""
Convert between units, returns value in target_unit
:param source_unit: The unit of value
:param target_unit: The desired output unit
:param value: The value, in source_unit, to convert
"""
# Convert value from source_unit to the ultimate base unit
current_unit = source_unit
current_value = value
while current_unit.base_unit is not None:
current_value = current_unit.unit_to_base(current_value)
next_unit = current_unit.base_unit
current_unit = next_unit
# Get the chain of conversions between target_unit and the ultimate base unit
current_unit = target_unit
unit_chain = []
while current_unit.base_unit is not None:
unit_chain.append(current_unit)
next_unit = current_unit.base_unit
current_unit = next_unit
# Follow the chain of conversions back to target_unit
for unit in reversed(unit_chain):
current_value = unit.base_to_unit(current_value)
# Return it!
return current_value
# Some typical units to be used
meter = Unit(base_unit=None, base_to_unit=lambda x: None, unit_to_base=lambda x: None)
centimeter = Unit(
base_unit=meter, base_to_unit=lambda x: x * 100, unit_to_base=lambda x: x / 100
)
foot = Unit(
base_unit=meter,
base_to_unit=lambda x: x / 0.3048,
unit_to_base=lambda x: x * 0.3048,
)
inch = Unit(
base_unit=foot, base_to_unit=lambda x: x * 12, unit_to_base=lambda x: x / 12
) | /robotpy_wpilib_utilities-2023.1.0-py3-none-any.whl/robotpy_ext/common_drivers/units.py | 0.932607 | 0.561215 | units.py | pypi |
import math
import wpilib
from wpilib.simulation import AnalogInputSim
from .distance_sensors import SharpIR2Y0A02, SharpIR2Y0A21, SharpIR2Y0A41
class SharpIR2Y0A02Sim:
"""
An easy to use simulation interface for a Sharp GP2Y0A02YK0F
"""
def __init__(self, sensor: SharpIR2Y0A02) -> None:
assert isinstance(sensor, SharpIR2Y0A02)
self._sim = AnalogInputSim(sensor.distance)
self._distance = 0
def getDistance(self) -> float:
"""Get set distance (not distance sensor sees) in centimeters"""
return self._distance
def setDistance(self, d) -> None:
"""Set distance in centimeters"""
self._distance = d
d = max(min(d, 145.0), 22.5)
v = math.pow(d / 62.28, 1 / -1.092)
self._sim.setVoltage(v)
class SharpIR2Y0A21Sim:
"""
An easy to use simulation interface for a Sharp GP2Y0A21YK0F
"""
def __init__(self, sensor: SharpIR2Y0A21) -> None:
assert isinstance(sensor, SharpIR2Y0A21)
self._sim = AnalogInputSim(sensor.distance)
self._distance = 0
def getDistance(self) -> float:
"""Get set distance (not distance sensor sees) in centimeters"""
return self._distance
def setDistance(self, d) -> None:
"""Set distance in centimeters"""
self._distance = d
d = max(min(d, 80.0), 10.0)
v = math.pow(d / 26.449, 1 / -1.226)
self._sim.setVoltage(v)
class SharpIR2Y0A41Sim:
"""
An easy to use simulation interface for a Sharp GP2Y0A41SK0F
"""
def __init__(self, sensor: SharpIR2Y0A41) -> None:
assert isinstance(sensor, SharpIR2Y0A41)
self._sim = AnalogInputSim(sensor.distance)
self._distance = 0
def getDistance(self) -> float:
"""Get set distance (not distance sensor sees) in centimeters"""
return self._distance
def setDistance(self, d) -> None:
"""Set distance in centimeters"""
self._distance = d
d = max(min(d, 35.0), 4.5)
v = math.pow(d / 12.84, 1 / -0.9824)
self._sim.setVoltage(v) | /robotpy_wpilib_utilities-2023.1.0-py3-none-any.whl/robotpy_ext/common_drivers/distance_sensors_sim.py | 0.823506 | 0.62986 | distance_sensors_sim.py | pypi |
import wpilib
import math
class SharpIR2Y0A02:
"""
Sharp GP2Y0A02YK0F is an analog IR sensor capable of measuring
distances from 20cm to 150cm. Output distance is measured in
centimeters.
Distance is calculated using the following equation derived from
the graph provided in the datasheet::
62.28*x ^ -1.092
.. warning:: FRC Teams: the case on these sensors is conductive and
grounded, and should not be mounted on a metallic
surface!
"""
def __init__(self, port):
""":param port: Analog port number"""
self.distance = wpilib.AnalogInput(port)
def getDistance(self):
"""
:returns: distance in centimeters. The output is constrained to
be between 22.5 and 145
"""
# Don't allow zero/negative values
v = max(self.distance.getVoltage(), 0.00001)
d = 62.28 * math.pow(v, -1.092)
# Constrain output
return max(min(d, 145.0), 22.5)
class SharpIR2Y0A21:
"""
Sharp GP2Y0A21YK0F is an analog IR sensor capable of measuring
distances from 10cm to 80cm. Output distance is measured in
centimeters.
Distance is calculated using the following equation derived from
the graph provided in the datasheet::
26.449*x ^ -1.226
.. warning:: FRC Teams: the case on these sensors is conductive and
grounded, and should not be mounted on a metallic
surface!
"""
def __init__(self, port):
""":param port: Analog port number"""
self.distance = wpilib.AnalogInput(port)
def getDistance(self):
"""
:returns: distance in centimeters. The output is constrained to
be between 10 and 80
"""
# Don't allow zero/negative values
v = max(self.distance.getVoltage(), 0.00001)
d = 26.449 * math.pow(v, -1.226)
# Constrain output
return max(min(d, 80.0), 10.0)
class SharpIR2Y0A41:
"""
Sharp GP2Y0A41SK0F is an analog IR sensor capable of measuring
distances from 4cm to 40cm. Output distance is measured in
centimeters.
Distance is calculated using the following equation derived from
the graph provided in the datasheet::
12.84*x ^ -0.9824
.. warning:: FRC Teams: the case on these sensors is conductive and
grounded, and should not be mounted on a metallic
surface!
"""
def __init__(self, port):
""":param port: Analog port number"""
self.distance = wpilib.AnalogInput(port)
def getDistance(self):
"""
:returns: distance in centimeters. The output is constrained to
be between 4.5 and 35
"""
# Don't allow zero/negative values
v = max(self.distance.getVoltage(), 0.00001)
d = 12.84 * math.pow(v, -0.9824)
# Constrain output
return max(min(d, 35.0), 4.5)
# backwards compat
SharpIRGP2Y0A41SK0F = SharpIR2Y0A41 | /robotpy_wpilib_utilities-2023.1.0-py3-none-any.whl/robotpy_ext/common_drivers/distance_sensors.py | 0.855263 | 0.73544 | distance_sensors.py | pypi |
import wpilib
from . import driver_base
from . import units
class MaxSonarEZPulseWidth(driver_base.DriverBase):
"""
This is a driver for the MaxSonar EZ series of sonar sensors, using the pulse-width output of the sensor.
To use this driver, pin 2 on the sensor must be mapped to a dio pin.
"""
verified = True
def __init__(self, channel, output_units=units.inch):
"""Sonar sensor constructor
:param channel: The digital input index which is wired to the pulse-width output pin (pin 2) on the sensor.
:param output_units: The Unit instance specifying the format of value to return
"""
# Save value
self.output_units = output_units
# Setup the counter
self.counter = wpilib.Counter(channel)
self.counter.setSemiPeriodMode(highSemiPeriod=True)
# Call the parents
super().__init__()
def get(self):
"""Return the current sonar sensor reading, in the units specified from the constructor"""
inches = self.counter.getPeriod() / 0.000147
return units.convert(units.inch, self.output_units, inches)
class MaxSonarEZAnalog(driver_base.DriverBase):
"""
This is a driver for the MaxSonar EZ series of sonar sensors, using the analog output of the sensor.
To use this driver, pin 3 on the sensor must be mapped to an analog pin, and the sensor must be on a 5v supply.
"""
# This code has actually never been run, so it is extra not-verified!
verified = False
def __init__(self, channel, output_units=units.inch):
"""Sonar sensor constructor
:param channel: The analog input index which is wired to the analog output pin (pin 3) on the sensor.
:param output_units: The Unit instance specifying the format of value to return
"""
# Save value
self.output_units = output_units
# Setup the analog input
self.analog = wpilib.AnalogInput(channel)
# Call the parents
super().__init__()
def get(self):
"""Return the current sonar sensor reading, in the units specified from the constructor"""
centimeters = self.analog.getVoltage() / 0.0049
return units.convert(units.centimeter, self.output_units, centimeters) | /robotpy_wpilib_utilities-2023.1.0-py3-none-any.whl/robotpy_ext/common_drivers/xl_max_sonar_ez.py | 0.87724 | 0.501099 | xl_max_sonar_ez.py | pypi |
import wpilib
import logging
from typing import List, Tuple
logger = logging.getLogger("simple_watchdog")
__all__ = ["SimpleWatchdog"]
class SimpleWatchdog:
"""A class that's a wrapper around a watchdog timer.
When the timer expires, a message is printed to the console and an optional user-provided
callback is invoked.
The watchdog is initialized disabled, so the user needs to call enable() before use.
.. note:: This is a simpler replacement for the :class:`wpilib.Watchdog`,
and should function mostly the same (except that this watchdog will
not detect infinite loops).
.. warning:: This watchdog is not threadsafe
"""
# Used for timeout print rate-limiting
kMinPrintPeriod = 1000000 # us
def __init__(self, timeout: float):
"""Watchdog constructor.
:param timeout: The watchdog's timeout in seconds with microsecond resolution.
"""
self._get_time = wpilib.RobotController.getFPGATime
self._startTime = 0 # us
self._timeout = int(timeout * 1e6) # us
self._expirationTime = 0 # us
self._lastTimeoutPrintTime = 0 # us
self._lastEpochsPrintTime = 0 # us
self._epochs: List[Tuple[str, int]] = []
def getTime(self) -> float:
"""Returns the time in seconds since the watchdog was last fed."""
return (self._get_time() - self._startTime) / 1e6
def setTimeout(self, timeout: float) -> None:
"""Sets the watchdog's timeout.
:param timeout: The watchdog's timeout in seconds with microsecond
resolution.
"""
self._epochs.clear()
timeout = int(timeout * 1e6) # us
self._timeout = timeout
self._startTime = self._get_time()
self._expirationTime = self._startTime + timeout
def getTimeout(self) -> float:
"""Returns the watchdog's timeout in seconds."""
return self._timeout / 1e6
def isExpired(self) -> bool:
"""Returns true if the watchdog timer has expired."""
return self._get_time() > self._expirationTime
def addEpoch(self, epochName: str) -> None:
"""
Adds time since last epoch to the list printed by printEpochs().
Epochs are a way to partition the time elapsed so that when
overruns occur, one can determine which parts of an operation
consumed the most time.
:param epochName: The name to associate with the epoch.
"""
self._epochs.append((epochName, self._get_time()))
def printIfExpired(self) -> None:
"""Prints list of epochs added so far and their times."""
now = self._get_time()
if (
now > self._expirationTime
and now - self._lastEpochsPrintTime > self.kMinPrintPeriod
):
self._lastEpochsPrintTime = now
prev = self._startTime
logger.warning("Watchdog not fed after %.6fs", (now - prev) / 1e6)
epoch_logs = []
for key, value in self._epochs:
time = (value - prev) / 1e6
epoch_logs.append(f"\t{key}: {time:.6f}")
prev = value
logger.info("Epochs:\n%s", "\n".join(epoch_logs))
def reset(self) -> None:
"""Resets the watchdog timer.
This also enables the timer if it was previously disabled.
"""
self.enable()
def enable(self) -> None:
"""Enables the watchdog timer."""
self._epochs.clear()
self._startTime = self._get_time()
self._expirationTime = self._startTime + self._timeout
def disable(self) -> None:
"""Disables the watchdog timer."""
# .. this doesn't do anything | /robotpy_wpilib_utilities-2023.1.0-py3-none-any.whl/robotpy_ext/misc/simple_watchdog.py | 0.908019 | 0.355747 | simple_watchdog.py | pypi |
import math
kInchesPerFoot = 12.0
kMetersPerInch = 0.0254
kSecondsPerMinute = 60
kMillisecondsPerSecond = 1000
kKilogramsPerLb = 0.453592
def metersToFeet(meters: float) -> float:
"""Converts given meters to feet.
:param meters: The meters to convert to feet.
:returns: Feet converted from meters.
"""
return metersToInches(meters) / kInchesPerFoot
def feetToMeters(feet: float) -> float:
"""Converts given feet to meters.
:param feet: The feet to convert to meters.
:returns: Meters converted from feet.
"""
return inchesToMeters(feet * kInchesPerFoot)
def metersToInches(meters: float) -> float:
"""Converts given meters to inches.
:param meters: The meters to convert to inches.
:returns: Inches converted from meters.
"""
return meters / kMetersPerInch
def inchesToMeters(inches: float) -> float:
"""Converts given inches to meters.
:param inches: The inches to convert to meters.
:returns: Meters converted from inches.
"""
return inches * kMetersPerInch
# Converts given degrees to radians.
degreesToRadians = math.radians
# Converts given radians to degrees.
radiansToDegrees = math.degrees
def radiansToRotations(radians: float) -> float:
"""Converts given radians to rotations.
:param radians: The radians to convert.
:returns: rotations Converted from radians.
"""
return radians / math.tau
def degreesToRotations(degrees: float) -> float:
"""Converts given degrees to rotations.
:param degrees: The degrees to convert.
:returns: rotations Converted from degrees.
"""
return degrees / 360
def rotationsToDegrees(rotations: float) -> float:
"""Converts given rotations to degrees.
:param rotations: The rotations to convert.
:returns: degrees Converted from rotations.
"""
return rotations * 360
def rotationsToRadians(rotations: float) -> float:
"""Converts given rotations to radians.
:param rotations: The rotations to convert.
:returns: radians Converted from rotations.
"""
return rotations * math.tau
def rotationsPerMinuteToRadiansPerSecond(rpm: float) -> float:
"""Converts rotations per minute to radians per second.
:param rpm: The rotations per minute to convert to radians per second.
:returns: Radians per second converted from rotations per minute.
"""
return (rpm / kSecondsPerMinute) * math.tau
def radiansPerSecondToRotationsPerMinute(radiansPerSecond: float) -> float:
"""Converts radians per second to rotations per minute.
:param radiansPerSecond: The radians per second to convert to from rotations per minute.
:returns: Rotations per minute converted from radians per second.
"""
return (radiansPerSecond * kSecondsPerMinute) / math.tau
def millisecondsToSeconds(milliseconds: float) -> float:
"""Converts given milliseconds to seconds.
:param milliseconds: The milliseconds to convert to seconds.
:returns: Seconds converted from milliseconds.
"""
return milliseconds / kMillisecondsPerSecond
def secondsToMilliseconds(seconds: float) -> float:
"""Converts given seconds to milliseconds.
:param seconds: The seconds to convert to milliseconds.
:returns: Milliseconds converted from seconds.
"""
return seconds * kMillisecondsPerSecond
def kilogramsToLbs(kilograms: float) -> float:
"""Converts kilograms into lbs (pound-mass).
:param kilograms: The kilograms to convert to lbs (pound-mass).
:returns: Lbs (pound-mass) converted from kilograms.
"""
return kilograms / kKilogramsPerLb
def lbsToKilograms(lbs: float) -> float:
"""Converts lbs (pound-mass) into kilograms.
:param lbs: The lbs (pound-mass) to convert to kilograms.
:returns: Kilograms converted from lbs (pound-mass).
"""
return lbs * kKilogramsPerLb | /robotpy_wpimath-2023.4.3.1-cp38-cp38-win_amd64.whl/wpimath/units.py | 0.93711 | 0.909184 | units.py | pypi |
from os.path import abspath, join, dirname
_root = abspath(dirname(__file__))
libinit_import = "wpimath._impl._init_wpimath_cpp"
depends = ['wpiutil']
pypi_package = 'robotpy-wpimath'
def get_include_dirs():
return [join(_root, "include"), join(_root, "rpy-include"), join(_root, "src"), join(_root, "src", "eigen"), join(_root, "src", "type_casters")]
def get_library_dirs():
return [join(_root, "lib")]
def get_library_dirs_rel():
return ['lib']
def get_library_names():
return ['wpimath']
def get_library_full_names():
return ['wpimath.dll']
def get_type_casters_cfg(casters):
casters.update({'units::feet_per_second_squared_t': {'hdr': 'units_acceleration_type_caster.h', 'darg': True}, 'units::meters_per_second_squared_t': {'hdr': 'units_acceleration_type_caster.h', 'darg': True}, 'units::standard_gravity_t': {'hdr': 'units_acceleration_type_caster.h', 'darg': True}, 'units::feet_per_second_squared': {'hdr': 'units_acceleration_type_caster.h'}, 'units::meters_per_second_squared': {'hdr': 'units_acceleration_type_caster.h'}, 'units::standard_gravity': {'hdr': 'units_acceleration_type_caster.h'}, 'units::arcminute_t': {'hdr': 'units_angle_type_caster.h', 'darg': True}, 'units::arcsecond_t': {'hdr': 'units_angle_type_caster.h', 'darg': True}, 'units::degree_t': {'hdr': 'units_angle_type_caster.h', 'darg': True}, 'units::gradian_t': {'hdr': 'units_angle_type_caster.h', 'darg': True}, 'units::kiloradian_t': {'hdr': 'units_angle_type_caster.h', 'darg': True}, 'units::microradian_t': {'hdr': 'units_angle_type_caster.h', 'darg': True}, 'units::milliarcsecond_t': {'hdr': 'units_angle_type_caster.h', 'darg': True}, 'units::milliradian_t': {'hdr': 'units_angle_type_caster.h', 'darg': True}, 'units::nanoradian_t': {'hdr': 'units_angle_type_caster.h', 'darg': True}, 'units::radian_t': {'hdr': 'units_angle_type_caster.h', 'darg': True}, 'units::turn_t': {'hdr': 'units_angle_type_caster.h', 'darg': True}, 'units::arcminute': {'hdr': 'units_angle_type_caster.h'}, 'units::arcminutes': {'hdr': 'units_angle_type_caster.h'}, 'units::arcsecond': {'hdr': 'units_angle_type_caster.h'}, 'units::arcseconds': {'hdr': 'units_angle_type_caster.h'}, 'units::degree': {'hdr': 'units_angle_type_caster.h'}, 'units::degrees': {'hdr': 'units_angle_type_caster.h'}, 'units::gradian': {'hdr': 'units_angle_type_caster.h'}, 'units::gradians': {'hdr': 'units_angle_type_caster.h'}, 'units::kiloradian': {'hdr': 'units_angle_type_caster.h'}, 'units::kiloradians': {'hdr': 'units_angle_type_caster.h'}, 'units::microradian': {'hdr': 'units_angle_type_caster.h'}, 'units::microradians': {'hdr': 'units_angle_type_caster.h'}, 'units::milliarcsecond': {'hdr': 'units_angle_type_caster.h'}, 'units::milliarcseconds': {'hdr': 'units_angle_type_caster.h'}, 'units::milliradian': {'hdr': 'units_angle_type_caster.h'}, 'units::milliradians': {'hdr': 'units_angle_type_caster.h'}, 'units::nanoradian': {'hdr': 'units_angle_type_caster.h'}, 'units::nanoradians': {'hdr': 'units_angle_type_caster.h'}, 'units::radian': {'hdr': 'units_angle_type_caster.h'}, 'units::radians': {'hdr': 'units_angle_type_caster.h'}, 'units::turn': {'hdr': 'units_angle_type_caster.h'}, 'units::turns': {'hdr': 'units_angle_type_caster.h'}, 'units::radians_per_second_squared_t': {'hdr': 'units_angular_acceleration_type_caster.h', 'darg': True}, 'units::degrees_per_second_squared_t': {'hdr': 'units_angular_acceleration_type_caster.h', 'darg': True}, 'units::radians_per_second_squared': {'hdr': 'units_angular_acceleration_type_caster.h'}, 'units::degrees_per_second_squared': {'hdr': 'units_angular_acceleration_type_caster.h'}, 'units::degrees_per_second_t': {'hdr': 'units_angular_velocity_type_caster.h', 'darg': True}, 'units::milliarcseconds_per_year_t': {'hdr': 'units_angular_velocity_type_caster.h', 'darg': True}, 'units::radians_per_second_t': {'hdr': 'units_angular_velocity_type_caster.h', 'darg': True}, 'units::turns_per_second_t': {'hdr': 'units_angular_velocity_type_caster.h', 'darg': True}, 'units::revolutions_per_minute_t': {'hdr': 'units_angular_velocity_type_caster.h', 'darg': True}, 'units::degrees_per_second': {'hdr': 'units_angular_velocity_type_caster.h'}, 'units::milliarcseconds_per_year': {'hdr': 'units_angular_velocity_type_caster.h'}, 'units::radians_per_second': {'hdr': 'units_angular_velocity_type_caster.h'}, 'units::turns_per_second': {'hdr': 'units_angular_velocity_type_caster.h'}, 'units::revolutions_per_minute': {'hdr': 'units_angular_velocity_type_caster.h'}, 'units::acre_t': {'hdr': 'units_area_type_caster.h', 'darg': True}, 'units::hectare_t': {'hdr': 'units_area_type_caster.h', 'darg': True}, 'units::square_foot_t': {'hdr': 'units_area_type_caster.h', 'darg': True}, 'units::square_inch_t': {'hdr': 'units_area_type_caster.h', 'darg': True}, 'units::square_kilometer_t': {'hdr': 'units_area_type_caster.h', 'darg': True}, 'units::square_meter_t': {'hdr': 'units_area_type_caster.h', 'darg': True}, 'units::square_mile_t': {'hdr': 'units_area_type_caster.h', 'darg': True}, 'units::acre': {'hdr': 'units_area_type_caster.h'}, 'units::acres': {'hdr': 'units_area_type_caster.h'}, 'units::hectare': {'hdr': 'units_area_type_caster.h'}, 'units::hectares': {'hdr': 'units_area_type_caster.h'}, 'units::square_feet': {'hdr': 'units_area_type_caster.h'}, 'units::square_foot': {'hdr': 'units_area_type_caster.h'}, 'units::square_inch': {'hdr': 'units_area_type_caster.h'}, 'units::square_inches': {'hdr': 'units_area_type_caster.h'}, 'units::square_kilometer': {'hdr': 'units_area_type_caster.h'}, 'units::square_kilometers': {'hdr': 'units_area_type_caster.h'}, 'units::square_meter': {'hdr': 'units_area_type_caster.h'}, 'units::square_meters': {'hdr': 'units_area_type_caster.h'}, 'units::square_mile': {'hdr': 'units_area_type_caster.h'}, 'units::square_miles': {'hdr': 'units_area_type_caster.h'}, 'units::farad_t': {'hdr': 'units_capacitance_type_caster.h', 'darg': True}, 'units::kilofarad_t': {'hdr': 'units_capacitance_type_caster.h', 'darg': True}, 'units::microfarad_t': {'hdr': 'units_capacitance_type_caster.h', 'darg': True}, 'units::millifarad_t': {'hdr': 'units_capacitance_type_caster.h', 'darg': True}, 'units::nanofarad_t': {'hdr': 'units_capacitance_type_caster.h', 'darg': True}, 'units::farad': {'hdr': 'units_capacitance_type_caster.h'}, 'units::farads': {'hdr': 'units_capacitance_type_caster.h'}, 'units::kilofarad': {'hdr': 'units_capacitance_type_caster.h'}, 'units::kilofarads': {'hdr': 'units_capacitance_type_caster.h'}, 'units::microfarad': {'hdr': 'units_capacitance_type_caster.h'}, 'units::microfarads': {'hdr': 'units_capacitance_type_caster.h'}, 'units::millifarad': {'hdr': 'units_capacitance_type_caster.h'}, 'units::millifarads': {'hdr': 'units_capacitance_type_caster.h'}, 'units::nanofarad': {'hdr': 'units_capacitance_type_caster.h'}, 'units::nanofarads': {'hdr': 'units_capacitance_type_caster.h'}, 'units::ampere_hour_t': {'hdr': 'units_charge_type_caster.h', 'darg': True}, 'units::coulomb_t': {'hdr': 'units_charge_type_caster.h', 'darg': True}, 'units::kiloampere_hour_t': {'hdr': 'units_charge_type_caster.h', 'darg': True}, 'units::kilocoulomb_t': {'hdr': 'units_charge_type_caster.h', 'darg': True}, 'units::microampere_hour_t': {'hdr': 'units_charge_type_caster.h', 'darg': True}, 'units::microcoulomb_t': {'hdr': 'units_charge_type_caster.h', 'darg': True}, 'units::milliampere_hour_t': {'hdr': 'units_charge_type_caster.h', 'darg': True}, 'units::millicoulomb_t': {'hdr': 'units_charge_type_caster.h', 'darg': True}, 'units::nanoampere_hour_t': {'hdr': 'units_charge_type_caster.h', 'darg': True}, 'units::nanocoulomb_t': {'hdr': 'units_charge_type_caster.h', 'darg': True}, 'units::ampere_hour': {'hdr': 'units_charge_type_caster.h'}, 'units::ampere_hours': {'hdr': 'units_charge_type_caster.h'}, 'units::coulomb': {'hdr': 'units_charge_type_caster.h'}, 'units::coulombs': {'hdr': 'units_charge_type_caster.h'}, 'units::kiloampere_hour': {'hdr': 'units_charge_type_caster.h'}, 'units::kiloampere_hours': {'hdr': 'units_charge_type_caster.h'}, 'units::kilocoulomb': {'hdr': 'units_charge_type_caster.h'}, 'units::kilocoulombs': {'hdr': 'units_charge_type_caster.h'}, 'units::microampere_hour': {'hdr': 'units_charge_type_caster.h'}, 'units::microampere_hours': {'hdr': 'units_charge_type_caster.h'}, 'units::microcoulomb': {'hdr': 'units_charge_type_caster.h'}, 'units::microcoulombs': {'hdr': 'units_charge_type_caster.h'}, 'units::milliampere_hour': {'hdr': 'units_charge_type_caster.h'}, 'units::milliampere_hours': {'hdr': 'units_charge_type_caster.h'}, 'units::millicoulomb': {'hdr': 'units_charge_type_caster.h'}, 'units::millicoulombs': {'hdr': 'units_charge_type_caster.h'}, 'units::nanoampere_hour': {'hdr': 'units_charge_type_caster.h'}, 'units::nanoampere_hours': {'hdr': 'units_charge_type_caster.h'}, 'units::nanocoulomb': {'hdr': 'units_charge_type_caster.h'}, 'units::nanocoulombs': {'hdr': 'units_charge_type_caster.h'}, 'units::percent_t': {'hdr': 'units_concentration_type_caster.h', 'darg': True}, 'units::ppb_t': {'hdr': 'units_concentration_type_caster.h', 'darg': True}, 'units::ppm_t': {'hdr': 'units_concentration_type_caster.h', 'darg': True}, 'units::ppt_t': {'hdr': 'units_concentration_type_caster.h', 'darg': True}, 'units::parts_per_billion': {'hdr': 'units_concentration_type_caster.h'}, 'units::parts_per_million': {'hdr': 'units_concentration_type_caster.h'}, 'units::parts_per_trillion': {'hdr': 'units_concentration_type_caster.h'}, 'units::percent': {'hdr': 'units_concentration_type_caster.h'}, 'units::ppb': {'hdr': 'units_concentration_type_caster.h'}, 'units::ppm': {'hdr': 'units_concentration_type_caster.h'}, 'units::ppt': {'hdr': 'units_concentration_type_caster.h'}, 'units::kilosiemens_t': {'hdr': 'units_conductance_type_caster.h', 'darg': True}, 'units::microsiemens_t': {'hdr': 'units_conductance_type_caster.h', 'darg': True}, 'units::millisiemens_t': {'hdr': 'units_conductance_type_caster.h', 'darg': True}, 'units::nanosiemens_t': {'hdr': 'units_conductance_type_caster.h', 'darg': True}, 'units::siemens_t': {'hdr': 'units_conductance_type_caster.h', 'darg': True}, 'units::kilosiemens': {'hdr': 'units_conductance_type_caster.h'}, 'units::microsiemens': {'hdr': 'units_conductance_type_caster.h'}, 'units::millisiemens': {'hdr': 'units_conductance_type_caster.h'}, 'units::nanosiemens': {'hdr': 'units_conductance_type_caster.h'}, 'units::siemens': {'hdr': 'units_conductance_type_caster.h'}, 'units::ampere_t': {'hdr': 'units_current_type_caster.h', 'darg': True}, 'units::kiloampere_t': {'hdr': 'units_current_type_caster.h', 'darg': True}, 'units::microampere_t': {'hdr': 'units_current_type_caster.h', 'darg': True}, 'units::milliampere_t': {'hdr': 'units_current_type_caster.h', 'darg': True}, 'units::nanoampere_t': {'hdr': 'units_current_type_caster.h', 'darg': True}, 'units::ampere': {'hdr': 'units_current_type_caster.h'}, 'units::amperes': {'hdr': 'units_current_type_caster.h'}, 'units::kiloampere': {'hdr': 'units_current_type_caster.h'}, 'units::kiloamperes': {'hdr': 'units_current_type_caster.h'}, 'units::microampere': {'hdr': 'units_current_type_caster.h'}, 'units::microamperes': {'hdr': 'units_current_type_caster.h'}, 'units::milliampere': {'hdr': 'units_current_type_caster.h'}, 'units::milliamperes': {'hdr': 'units_current_type_caster.h'}, 'units::nanoampere': {'hdr': 'units_current_type_caster.h'}, 'units::nanoamperes': {'hdr': 'units_current_type_caster.h'}, 'units::exabit_t': {'hdr': 'units_data_type_caster.h', 'darg': True}, 'units::exabyte_t': {'hdr': 'units_data_type_caster.h', 'darg': True}, 'units::exabit': {'hdr': 'units_data_type_caster.h'}, 'units::exabits': {'hdr': 'units_data_type_caster.h'}, 'units::exabyte': {'hdr': 'units_data_type_caster.h'}, 'units::exabytes': {'hdr': 'units_data_type_caster.h'}, 'units::exabits_per_second_t': {'hdr': 'units_data_transfer_rate_type_caster.h', 'darg': True}, 'units::exabytes_per_second_t': {'hdr': 'units_data_transfer_rate_type_caster.h', 'darg': True}, 'units::exabits_per_second': {'hdr': 'units_data_transfer_rate_type_caster.h'}, 'units::exabytes_per_second': {'hdr': 'units_data_transfer_rate_type_caster.h'}, 'units::grams_per_milliliter_t': {'hdr': 'units_density_type_caster.h', 'darg': True}, 'units::kilograms_per_cubic_meter_t': {'hdr': 'units_density_type_caster.h', 'darg': True}, 'units::kilograms_per_liter_t': {'hdr': 'units_density_type_caster.h', 'darg': True}, 'units::ounces_per_cubic_foot_t': {'hdr': 'units_density_type_caster.h', 'darg': True}, 'units::ounces_per_cubic_inch_t': {'hdr': 'units_density_type_caster.h', 'darg': True}, 'units::ounces_per_gallon_t': {'hdr': 'units_density_type_caster.h', 'darg': True}, 'units::pounds_per_cubic_foot_t': {'hdr': 'units_density_type_caster.h', 'darg': True}, 'units::pounds_per_cubic_inch_t': {'hdr': 'units_density_type_caster.h', 'darg': True}, 'units::pounds_per_gallon_t': {'hdr': 'units_density_type_caster.h', 'darg': True}, 'units::slugs_per_cubic_foot_t': {'hdr': 'units_density_type_caster.h', 'darg': True}, 'units::grams_per_milliliter': {'hdr': 'units_density_type_caster.h'}, 'units::kilograms_per_cubic_meter': {'hdr': 'units_density_type_caster.h'}, 'units::kilograms_per_liter': {'hdr': 'units_density_type_caster.h'}, 'units::ounces_per_cubic_foot': {'hdr': 'units_density_type_caster.h'}, 'units::ounces_per_cubic_inch': {'hdr': 'units_density_type_caster.h'}, 'units::ounces_per_gallon': {'hdr': 'units_density_type_caster.h'}, 'units::pounds_per_cubic_foot': {'hdr': 'units_density_type_caster.h'}, 'units::pounds_per_cubic_inch': {'hdr': 'units_density_type_caster.h'}, 'units::pounds_per_gallon': {'hdr': 'units_density_type_caster.h'}, 'units::slugs_per_cubic_foot': {'hdr': 'units_density_type_caster.h'}, 'units::british_thermal_unit_59_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::british_thermal_unit_iso_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::british_thermal_unit_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::calorie_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::foot_pound_t': {'hdr': 'units_torque_type_caster.h', 'darg': True}, 'units::joule_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::kilocalorie_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::kilojoule_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::kilowatt_hour_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::microcalorie_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::microjoule_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::millicalorie_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::millijoule_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::nanocalorie_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::nanojoule_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::therm_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::watt_hour_t': {'hdr': 'units_energy_type_caster.h', 'darg': True}, 'units::british_thermal_unit': {'hdr': 'units_energy_type_caster.h'}, 'units::british_thermal_unit_59': {'hdr': 'units_energy_type_caster.h'}, 'units::british_thermal_unit_iso': {'hdr': 'units_energy_type_caster.h'}, 'units::british_thermal_units': {'hdr': 'units_energy_type_caster.h'}, 'units::british_thermal_units_59': {'hdr': 'units_energy_type_caster.h'}, 'units::british_thermal_units_iso': {'hdr': 'units_energy_type_caster.h'}, 'units::calorie': {'hdr': 'units_energy_type_caster.h'}, 'units::calories': {'hdr': 'units_energy_type_caster.h'}, 'units::foot_pound': {'hdr': 'units_torque_type_caster.h'}, 'units::foot_pounds': {'hdr': 'units_torque_type_caster.h'}, 'units::joule': {'hdr': 'units_energy_type_caster.h'}, 'units::joules': {'hdr': 'units_energy_type_caster.h'}, 'units::kilocalorie': {'hdr': 'units_energy_type_caster.h'}, 'units::kilocalories': {'hdr': 'units_energy_type_caster.h'}, 'units::kilojoule': {'hdr': 'units_energy_type_caster.h'}, 'units::kilojoules': {'hdr': 'units_energy_type_caster.h'}, 'units::kilowatt_hour': {'hdr': 'units_energy_type_caster.h'}, 'units::kilowatt_hours': {'hdr': 'units_energy_type_caster.h'}, 'units::microcalorie': {'hdr': 'units_energy_type_caster.h'}, 'units::microcalories': {'hdr': 'units_energy_type_caster.h'}, 'units::microjoule': {'hdr': 'units_energy_type_caster.h'}, 'units::microjoules': {'hdr': 'units_energy_type_caster.h'}, 'units::millicalorie': {'hdr': 'units_energy_type_caster.h'}, 'units::millicalories': {'hdr': 'units_energy_type_caster.h'}, 'units::millijoule': {'hdr': 'units_energy_type_caster.h'}, 'units::millijoules': {'hdr': 'units_energy_type_caster.h'}, 'units::nanocalorie': {'hdr': 'units_energy_type_caster.h'}, 'units::nanocalories': {'hdr': 'units_energy_type_caster.h'}, 'units::nanojoule': {'hdr': 'units_energy_type_caster.h'}, 'units::nanojoules': {'hdr': 'units_energy_type_caster.h'}, 'units::therm': {'hdr': 'units_energy_type_caster.h'}, 'units::therms': {'hdr': 'units_energy_type_caster.h'}, 'units::watt_hour': {'hdr': 'units_energy_type_caster.h'}, 'units::watt_hours': {'hdr': 'units_energy_type_caster.h'}, 'units::dyne_t': {'hdr': 'units_force_type_caster.h', 'darg': True}, 'units::kilonewton_t': {'hdr': 'units_force_type_caster.h', 'darg': True}, 'units::kilopond_t': {'hdr': 'units_force_type_caster.h', 'darg': True}, 'units::micronewton_t': {'hdr': 'units_force_type_caster.h', 'darg': True}, 'units::millinewton_t': {'hdr': 'units_force_type_caster.h', 'darg': True}, 'units::nanonewton_t': {'hdr': 'units_force_type_caster.h', 'darg': True}, 'units::newton_t': {'hdr': 'units_force_type_caster.h', 'darg': True}, 'units::pound_t': {'hdr': 'units_mass_type_caster.h', 'darg': True}, 'units::poundal_t': {'hdr': 'units_force_type_caster.h', 'darg': True}, 'units::dyne': {'hdr': 'units_force_type_caster.h'}, 'units::dynes': {'hdr': 'units_force_type_caster.h'}, 'units::kilonewton': {'hdr': 'units_force_type_caster.h'}, 'units::kilonewtons': {'hdr': 'units_force_type_caster.h'}, 'units::kilopond': {'hdr': 'units_force_type_caster.h'}, 'units::kiloponds': {'hdr': 'units_force_type_caster.h'}, 'units::micronewton': {'hdr': 'units_force_type_caster.h'}, 'units::micronewtons': {'hdr': 'units_force_type_caster.h'}, 'units::millinewton': {'hdr': 'units_force_type_caster.h'}, 'units::millinewtons': {'hdr': 'units_force_type_caster.h'}, 'units::nanonewton': {'hdr': 'units_force_type_caster.h'}, 'units::nanonewtons': {'hdr': 'units_force_type_caster.h'}, 'units::newton': {'hdr': 'units_force_type_caster.h'}, 'units::newtons': {'hdr': 'units_force_type_caster.h'}, 'units::pound': {'hdr': 'units_mass_type_caster.h'}, 'units::poundal': {'hdr': 'units_force_type_caster.h'}, 'units::poundals': {'hdr': 'units_force_type_caster.h'}, 'units::pounds': {'hdr': 'units_mass_type_caster.h'}, 'units::hertz_t': {'hdr': 'units_frequency_type_caster.h', 'darg': True}, 'units::kilohertz_t': {'hdr': 'units_frequency_type_caster.h', 'darg': True}, 'units::microhertz_t': {'hdr': 'units_frequency_type_caster.h', 'darg': True}, 'units::millihertz_t': {'hdr': 'units_frequency_type_caster.h', 'darg': True}, 'units::nanohertz_t': {'hdr': 'units_frequency_type_caster.h', 'darg': True}, 'units::hertz': {'hdr': 'units_frequency_type_caster.h'}, 'units::kilohertz': {'hdr': 'units_frequency_type_caster.h'}, 'units::microhertz': {'hdr': 'units_frequency_type_caster.h'}, 'units::millihertz': {'hdr': 'units_frequency_type_caster.h'}, 'units::nanohertz': {'hdr': 'units_frequency_type_caster.h'}, 'units::footcandle_t': {'hdr': 'units_illuminance_type_caster.h', 'darg': True}, 'units::kilolux_t': {'hdr': 'units_illuminance_type_caster.h', 'darg': True}, 'units::lumens_per_square_inch_t': {'hdr': 'units_illuminance_type_caster.h', 'darg': True}, 'units::lux_t': {'hdr': 'units_illuminance_type_caster.h', 'darg': True}, 'units::microlux_t': {'hdr': 'units_illuminance_type_caster.h', 'darg': True}, 'units::millilux_t': {'hdr': 'units_illuminance_type_caster.h', 'darg': True}, 'units::nanolux_t': {'hdr': 'units_illuminance_type_caster.h', 'darg': True}, 'units::phot_t': {'hdr': 'units_illuminance_type_caster.h', 'darg': True}, 'units::footcandle': {'hdr': 'units_illuminance_type_caster.h'}, 'units::footcandles': {'hdr': 'units_illuminance_type_caster.h'}, 'units::kilolux': {'hdr': 'units_illuminance_type_caster.h'}, 'units::kiloluxes': {'hdr': 'units_illuminance_type_caster.h'}, 'units::lumens_per_square_inch': {'hdr': 'units_illuminance_type_caster.h'}, 'units::lux': {'hdr': 'units_illuminance_type_caster.h'}, 'units::luxes': {'hdr': 'units_illuminance_type_caster.h'}, 'units::microlux': {'hdr': 'units_illuminance_type_caster.h'}, 'units::microluxes': {'hdr': 'units_illuminance_type_caster.h'}, 'units::millilux': {'hdr': 'units_illuminance_type_caster.h'}, 'units::milliluxes': {'hdr': 'units_illuminance_type_caster.h'}, 'units::nanolux': {'hdr': 'units_illuminance_type_caster.h'}, 'units::nanoluxes': {'hdr': 'units_illuminance_type_caster.h'}, 'units::phot': {'hdr': 'units_illuminance_type_caster.h'}, 'units::phots': {'hdr': 'units_illuminance_type_caster.h'}, 'units::kiloohm_t': {'hdr': 'units_impedance_type_caster.h', 'darg': True}, 'units::microohm_t': {'hdr': 'units_impedance_type_caster.h', 'darg': True}, 'units::milliohm_t': {'hdr': 'units_impedance_type_caster.h', 'darg': True}, 'units::nanoohm_t': {'hdr': 'units_impedance_type_caster.h', 'darg': True}, 'units::ohm_t': {'hdr': 'units_impedance_type_caster.h', 'darg': True}, 'units::kiloohm': {'hdr': 'units_impedance_type_caster.h'}, 'units::kiloohms': {'hdr': 'units_impedance_type_caster.h'}, 'units::microohm': {'hdr': 'units_impedance_type_caster.h'}, 'units::microohms': {'hdr': 'units_impedance_type_caster.h'}, 'units::milliohm': {'hdr': 'units_impedance_type_caster.h'}, 'units::milliohms': {'hdr': 'units_impedance_type_caster.h'}, 'units::nanoohm': {'hdr': 'units_impedance_type_caster.h'}, 'units::nanoohms': {'hdr': 'units_impedance_type_caster.h'}, 'units::ohm': {'hdr': 'units_impedance_type_caster.h'}, 'units::ohms': {'hdr': 'units_impedance_type_caster.h'}, 'units::henry_t': {'hdr': 'units_inductance_type_caster.h', 'darg': True}, 'units::kilohenry_t': {'hdr': 'units_inductance_type_caster.h', 'darg': True}, 'units::microhenry_t': {'hdr': 'units_inductance_type_caster.h', 'darg': True}, 'units::millihenry_t': {'hdr': 'units_inductance_type_caster.h', 'darg': True}, 'units::nanohenry_t': {'hdr': 'units_inductance_type_caster.h', 'darg': True}, 'units::henries': {'hdr': 'units_inductance_type_caster.h'}, 'units::henry': {'hdr': 'units_inductance_type_caster.h'}, 'units::kilohenries': {'hdr': 'units_inductance_type_caster.h'}, 'units::kilohenry': {'hdr': 'units_inductance_type_caster.h'}, 'units::microhenries': {'hdr': 'units_inductance_type_caster.h'}, 'units::microhenry': {'hdr': 'units_inductance_type_caster.h'}, 'units::millihenries': {'hdr': 'units_inductance_type_caster.h'}, 'units::millihenry': {'hdr': 'units_inductance_type_caster.h'}, 'units::nanohenries': {'hdr': 'units_inductance_type_caster.h'}, 'units::nanohenry': {'hdr': 'units_inductance_type_caster.h'}, 'units::angstrom_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::astronicalUnit_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::chain_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::cubit_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::fathom_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::foot_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::furlong_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::hand_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::inch_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::kilometer_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::league_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::lightyear_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::meter_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::micrometer_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::mil_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::mile_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::millimeter_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::nanometer_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::nauticalLeague_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::nauticalMile_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::parsec_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::yard_t': {'hdr': 'units_length_type_caster.h', 'darg': True}, 'units::angstrom': {'hdr': 'units_length_type_caster.h'}, 'units::angstroms': {'hdr': 'units_length_type_caster.h'}, 'units::astronicalUnit': {'hdr': 'units_length_type_caster.h'}, 'units::astronicalUnits': {'hdr': 'units_length_type_caster.h'}, 'units::chain': {'hdr': 'units_length_type_caster.h'}, 'units::chains': {'hdr': 'units_length_type_caster.h'}, 'units::cubit': {'hdr': 'units_length_type_caster.h'}, 'units::cubits': {'hdr': 'units_length_type_caster.h'}, 'units::fathom': {'hdr': 'units_length_type_caster.h'}, 'units::fathoms': {'hdr': 'units_length_type_caster.h'}, 'units::feet': {'hdr': 'units_length_type_caster.h'}, 'units::foot': {'hdr': 'units_length_type_caster.h'}, 'units::furlong': {'hdr': 'units_length_type_caster.h'}, 'units::furlongs': {'hdr': 'units_length_type_caster.h'}, 'units::hand': {'hdr': 'units_length_type_caster.h'}, 'units::hands': {'hdr': 'units_length_type_caster.h'}, 'units::inch': {'hdr': 'units_length_type_caster.h'}, 'units::inches': {'hdr': 'units_length_type_caster.h'}, 'units::kilometer': {'hdr': 'units_length_type_caster.h'}, 'units::kilometers': {'hdr': 'units_length_type_caster.h'}, 'units::league': {'hdr': 'units_length_type_caster.h'}, 'units::leagues': {'hdr': 'units_length_type_caster.h'}, 'units::lightyear': {'hdr': 'units_length_type_caster.h'}, 'units::lightyears': {'hdr': 'units_length_type_caster.h'}, 'units::meter': {'hdr': 'units_length_type_caster.h'}, 'units::meters': {'hdr': 'units_length_type_caster.h'}, 'units::micrometer': {'hdr': 'units_length_type_caster.h'}, 'units::micrometers': {'hdr': 'units_length_type_caster.h'}, 'units::mil': {'hdr': 'units_length_type_caster.h'}, 'units::mile': {'hdr': 'units_length_type_caster.h'}, 'units::miles': {'hdr': 'units_length_type_caster.h'}, 'units::millimeter': {'hdr': 'units_length_type_caster.h'}, 'units::millimeters': {'hdr': 'units_length_type_caster.h'}, 'units::mils': {'hdr': 'units_length_type_caster.h'}, 'units::nanometer': {'hdr': 'units_length_type_caster.h'}, 'units::nanometers': {'hdr': 'units_length_type_caster.h'}, 'units::nauticalLeague': {'hdr': 'units_length_type_caster.h'}, 'units::nauticalLeagues': {'hdr': 'units_length_type_caster.h'}, 'units::nauticalMile': {'hdr': 'units_length_type_caster.h'}, 'units::nauticalMiles': {'hdr': 'units_length_type_caster.h'}, 'units::parsec': {'hdr': 'units_length_type_caster.h'}, 'units::parsecs': {'hdr': 'units_length_type_caster.h'}, 'units::yard': {'hdr': 'units_length_type_caster.h'}, 'units::yards': {'hdr': 'units_length_type_caster.h'}, 'units::kilolumen_t': {'hdr': 'units_luminous_flux_type_caster.h', 'darg': True}, 'units::lumen_t': {'hdr': 'units_luminous_flux_type_caster.h', 'darg': True}, 'units::microlumen_t': {'hdr': 'units_luminous_flux_type_caster.h', 'darg': True}, 'units::millilumen_t': {'hdr': 'units_luminous_flux_type_caster.h', 'darg': True}, 'units::nanolumen_t': {'hdr': 'units_luminous_flux_type_caster.h', 'darg': True}, 'units::kilolumen': {'hdr': 'units_luminous_flux_type_caster.h'}, 'units::kilolumens': {'hdr': 'units_luminous_flux_type_caster.h'}, 'units::lumen': {'hdr': 'units_luminous_flux_type_caster.h'}, 'units::lumens': {'hdr': 'units_luminous_flux_type_caster.h'}, 'units::microlumen': {'hdr': 'units_luminous_flux_type_caster.h'}, 'units::microlumens': {'hdr': 'units_luminous_flux_type_caster.h'}, 'units::millilumen': {'hdr': 'units_luminous_flux_type_caster.h'}, 'units::millilumens': {'hdr': 'units_luminous_flux_type_caster.h'}, 'units::nanolumen': {'hdr': 'units_luminous_flux_type_caster.h'}, 'units::nanolumens': {'hdr': 'units_luminous_flux_type_caster.h'}, 'units::candela_t': {'hdr': 'units_luminous_intensity_type_caster.h', 'darg': True}, 'units::kilocandela_t': {'hdr': 'units_luminous_intensity_type_caster.h', 'darg': True}, 'units::microcandela_t': {'hdr': 'units_luminous_intensity_type_caster.h', 'darg': True}, 'units::millicandela_t': {'hdr': 'units_luminous_intensity_type_caster.h', 'darg': True}, 'units::nanocandela_t': {'hdr': 'units_luminous_intensity_type_caster.h', 'darg': True}, 'units::candela': {'hdr': 'units_luminous_intensity_type_caster.h'}, 'units::candelas': {'hdr': 'units_luminous_intensity_type_caster.h'}, 'units::kilocandela': {'hdr': 'units_luminous_intensity_type_caster.h'}, 'units::kilocandelas': {'hdr': 'units_luminous_intensity_type_caster.h'}, 'units::microcandela': {'hdr': 'units_luminous_intensity_type_caster.h'}, 'units::microcandelas': {'hdr': 'units_luminous_intensity_type_caster.h'}, 'units::millicandela': {'hdr': 'units_luminous_intensity_type_caster.h'}, 'units::millicandelas': {'hdr': 'units_luminous_intensity_type_caster.h'}, 'units::nanocandela': {'hdr': 'units_luminous_intensity_type_caster.h'}, 'units::nanocandelas': {'hdr': 'units_luminous_intensity_type_caster.h'}, 'units::gauss_t': {'hdr': 'units_magnetic_field_strength_type_caster.h', 'darg': True}, 'units::kilotesla_t': {'hdr': 'units_magnetic_field_strength_type_caster.h', 'darg': True}, 'units::microtesla_t': {'hdr': 'units_magnetic_field_strength_type_caster.h', 'darg': True}, 'units::millitesla_t': {'hdr': 'units_magnetic_field_strength_type_caster.h', 'darg': True}, 'units::nanotesla_t': {'hdr': 'units_magnetic_field_strength_type_caster.h', 'darg': True}, 'units::tesla_t': {'hdr': 'units_magnetic_field_strength_type_caster.h', 'darg': True}, 'units::gauss': {'hdr': 'units_magnetic_field_strength_type_caster.h'}, 'units::kilotesla': {'hdr': 'units_magnetic_field_strength_type_caster.h'}, 'units::kiloteslas': {'hdr': 'units_magnetic_field_strength_type_caster.h'}, 'units::microtesla': {'hdr': 'units_magnetic_field_strength_type_caster.h'}, 'units::microteslas': {'hdr': 'units_magnetic_field_strength_type_caster.h'}, 'units::millitesla': {'hdr': 'units_magnetic_field_strength_type_caster.h'}, 'units::milliteslas': {'hdr': 'units_magnetic_field_strength_type_caster.h'}, 'units::nanotesla': {'hdr': 'units_magnetic_field_strength_type_caster.h'}, 'units::nanoteslas': {'hdr': 'units_magnetic_field_strength_type_caster.h'}, 'units::tesla': {'hdr': 'units_magnetic_field_strength_type_caster.h'}, 'units::teslas': {'hdr': 'units_magnetic_field_strength_type_caster.h'}, 'units::kiloweber_t': {'hdr': 'units_magnetic_flux_type_caster.h', 'darg': True}, 'units::maxwell_t': {'hdr': 'units_magnetic_flux_type_caster.h', 'darg': True}, 'units::microweber_t': {'hdr': 'units_magnetic_flux_type_caster.h', 'darg': True}, 'units::milliweber_t': {'hdr': 'units_magnetic_flux_type_caster.h', 'darg': True}, 'units::nanoweber_t': {'hdr': 'units_magnetic_flux_type_caster.h', 'darg': True}, 'units::weber_t': {'hdr': 'units_magnetic_flux_type_caster.h', 'darg': True}, 'units::kiloweber': {'hdr': 'units_magnetic_flux_type_caster.h'}, 'units::kilowebers': {'hdr': 'units_magnetic_flux_type_caster.h'}, 'units::maxwell': {'hdr': 'units_magnetic_flux_type_caster.h'}, 'units::maxwells': {'hdr': 'units_magnetic_flux_type_caster.h'}, 'units::microweber': {'hdr': 'units_magnetic_flux_type_caster.h'}, 'units::microwebers': {'hdr': 'units_magnetic_flux_type_caster.h'}, 'units::milliweber': {'hdr': 'units_magnetic_flux_type_caster.h'}, 'units::milliwebers': {'hdr': 'units_magnetic_flux_type_caster.h'}, 'units::nanoweber': {'hdr': 'units_magnetic_flux_type_caster.h'}, 'units::nanowebers': {'hdr': 'units_magnetic_flux_type_caster.h'}, 'units::weber': {'hdr': 'units_magnetic_flux_type_caster.h'}, 'units::webers': {'hdr': 'units_magnetic_flux_type_caster.h'}, 'units::carat_t': {'hdr': 'units_mass_type_caster.h', 'darg': True}, 'units::gram_t': {'hdr': 'units_mass_type_caster.h', 'darg': True}, 'units::kilogram_t': {'hdr': 'units_mass_type_caster.h', 'darg': True}, 'units::long_ton_t': {'hdr': 'units_mass_type_caster.h', 'darg': True}, 'units::metric_ton_t': {'hdr': 'units_mass_type_caster.h', 'darg': True}, 'units::microgram_t': {'hdr': 'units_mass_type_caster.h', 'darg': True}, 'units::milligram_t': {'hdr': 'units_mass_type_caster.h', 'darg': True}, 'units::nanogram_t': {'hdr': 'units_mass_type_caster.h', 'darg': True}, 'units::ounce_t': {'hdr': 'units_mass_type_caster.h', 'darg': True}, 'units::short_ton_t': {'hdr': 'units_mass_type_caster.h', 'darg': True}, 'units::slug_t': {'hdr': 'units_mass_type_caster.h', 'darg': True}, 'units::stone_t': {'hdr': 'units_mass_type_caster.h', 'darg': True}, 'units::carat': {'hdr': 'units_mass_type_caster.h'}, 'units::carats': {'hdr': 'units_mass_type_caster.h'}, 'units::gram': {'hdr': 'units_mass_type_caster.h'}, 'units::grams': {'hdr': 'units_mass_type_caster.h'}, 'units::kilogram': {'hdr': 'units_mass_type_caster.h'}, 'units::kilograms': {'hdr': 'units_mass_type_caster.h'}, 'units::long_ton': {'hdr': 'units_mass_type_caster.h'}, 'units::long_tons': {'hdr': 'units_mass_type_caster.h'}, 'units::metric_ton': {'hdr': 'units_mass_type_caster.h'}, 'units::metric_tons': {'hdr': 'units_mass_type_caster.h'}, 'units::microgram': {'hdr': 'units_mass_type_caster.h'}, 'units::micrograms': {'hdr': 'units_mass_type_caster.h'}, 'units::milligram': {'hdr': 'units_mass_type_caster.h'}, 'units::milligrams': {'hdr': 'units_mass_type_caster.h'}, 'units::nanogram': {'hdr': 'units_mass_type_caster.h'}, 'units::nanograms': {'hdr': 'units_mass_type_caster.h'}, 'units::ounce': {'hdr': 'units_mass_type_caster.h'}, 'units::ounces': {'hdr': 'units_mass_type_caster.h'}, 'units::short_ton': {'hdr': 'units_mass_type_caster.h'}, 'units::short_tons': {'hdr': 'units_mass_type_caster.h'}, 'units::slug': {'hdr': 'units_mass_type_caster.h'}, 'units::slugs': {'hdr': 'units_mass_type_caster.h'}, 'units::stone': {'hdr': 'units_mass_type_caster.h'}, 'units::kilogram_square_meter_t': {'hdr': 'units_moment_of_inertia_type_caster.h', 'darg': True}, 'units::kilogram_square_meter': {'hdr': 'units_moment_of_inertia_type_caster.h'}, 'units::kilogram_square_meters': {'hdr': 'units_moment_of_inertia_type_caster.h'}, 'units::horsepower_t': {'hdr': 'units_power_type_caster.h', 'darg': True}, 'units::kilowatt_t': {'hdr': 'units_power_type_caster.h', 'darg': True}, 'units::microwatt_t': {'hdr': 'units_power_type_caster.h', 'darg': True}, 'units::milliwatt_t': {'hdr': 'units_power_type_caster.h', 'darg': True}, 'units::nanowatt_t': {'hdr': 'units_power_type_caster.h', 'darg': True}, 'units::watt_t': {'hdr': 'units_power_type_caster.h', 'darg': True}, 'units::horsepower': {'hdr': 'units_power_type_caster.h'}, 'units::kilowatt': {'hdr': 'units_power_type_caster.h'}, 'units::kilowatts': {'hdr': 'units_power_type_caster.h'}, 'units::microwatt': {'hdr': 'units_power_type_caster.h'}, 'units::microwatts': {'hdr': 'units_power_type_caster.h'}, 'units::milliwatt': {'hdr': 'units_power_type_caster.h'}, 'units::milliwatts': {'hdr': 'units_power_type_caster.h'}, 'units::nanowatt': {'hdr': 'units_power_type_caster.h'}, 'units::nanowatts': {'hdr': 'units_power_type_caster.h'}, 'units::watt': {'hdr': 'units_power_type_caster.h'}, 'units::watts': {'hdr': 'units_power_type_caster.h'}, 'units::atmosphere_t': {'hdr': 'units_pressure_type_caster.h', 'darg': True}, 'units::bar_t': {'hdr': 'units_pressure_type_caster.h', 'darg': True}, 'units::kilopascal_t': {'hdr': 'units_pressure_type_caster.h', 'darg': True}, 'units::mbar_t': {'hdr': 'units_pressure_type_caster.h', 'darg': True}, 'units::micropascal_t': {'hdr': 'units_pressure_type_caster.h', 'darg': True}, 'units::millipascal_t': {'hdr': 'units_pressure_type_caster.h', 'darg': True}, 'units::nanopascal_t': {'hdr': 'units_pressure_type_caster.h', 'darg': True}, 'units::pascal_t': {'hdr': 'units_pressure_type_caster.h', 'darg': True}, 'units::pounds_per_square_inch_t': {'hdr': 'units_pressure_type_caster.h', 'darg': True}, 'units::torr_t': {'hdr': 'units_pressure_type_caster.h', 'darg': True}, 'units::atmosphere': {'hdr': 'units_pressure_type_caster.h'}, 'units::atmospheres': {'hdr': 'units_pressure_type_caster.h'}, 'units::bar': {'hdr': 'units_pressure_type_caster.h'}, 'units::bars': {'hdr': 'units_pressure_type_caster.h'}, 'units::kilopascal': {'hdr': 'units_pressure_type_caster.h'}, 'units::kilopascals': {'hdr': 'units_pressure_type_caster.h'}, 'units::mbar': {'hdr': 'units_pressure_type_caster.h'}, 'units::mbars': {'hdr': 'units_pressure_type_caster.h'}, 'units::micropascal': {'hdr': 'units_pressure_type_caster.h'}, 'units::micropascals': {'hdr': 'units_pressure_type_caster.h'}, 'units::millipascal': {'hdr': 'units_pressure_type_caster.h'}, 'units::millipascals': {'hdr': 'units_pressure_type_caster.h'}, 'units::nanopascal': {'hdr': 'units_pressure_type_caster.h'}, 'units::nanopascals': {'hdr': 'units_pressure_type_caster.h'}, 'units::pascal': {'hdr': 'units_pressure_type_caster.h'}, 'units::pascals': {'hdr': 'units_pressure_type_caster.h'}, 'units::pounds_per_square_inch': {'hdr': 'units_pressure_type_caster.h'}, 'units::torr': {'hdr': 'units_pressure_type_caster.h'}, 'units::torrs': {'hdr': 'units_pressure_type_caster.h'}, 'units::becquerel_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::curie_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::gray_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::kilobecquerel_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::kilogray_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::kilosievert_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::microbecquerel_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::microgray_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::microsievert_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::millibecquerel_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::milligray_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::millisievert_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::nanobecquerel_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::nanogray_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::nanosievert_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::rad_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::rutherford_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::sievert_t': {'hdr': 'units_radiation_type_caster.h', 'darg': True}, 'units::becquerel': {'hdr': 'units_radiation_type_caster.h'}, 'units::becquerels': {'hdr': 'units_radiation_type_caster.h'}, 'units::curie': {'hdr': 'units_radiation_type_caster.h'}, 'units::curies': {'hdr': 'units_radiation_type_caster.h'}, 'units::gray': {'hdr': 'units_radiation_type_caster.h'}, 'units::grays': {'hdr': 'units_radiation_type_caster.h'}, 'units::kilobecquerel': {'hdr': 'units_radiation_type_caster.h'}, 'units::kilobecquerels': {'hdr': 'units_radiation_type_caster.h'}, 'units::kilogray': {'hdr': 'units_radiation_type_caster.h'}, 'units::kilograys': {'hdr': 'units_radiation_type_caster.h'}, 'units::kilosievert': {'hdr': 'units_radiation_type_caster.h'}, 'units::kilosieverts': {'hdr': 'units_radiation_type_caster.h'}, 'units::microbecquerel': {'hdr': 'units_radiation_type_caster.h'}, 'units::microbecquerels': {'hdr': 'units_radiation_type_caster.h'}, 'units::microgray': {'hdr': 'units_radiation_type_caster.h'}, 'units::micrograys': {'hdr': 'units_radiation_type_caster.h'}, 'units::microsievert': {'hdr': 'units_radiation_type_caster.h'}, 'units::microsieverts': {'hdr': 'units_radiation_type_caster.h'}, 'units::millibecquerel': {'hdr': 'units_radiation_type_caster.h'}, 'units::millibecquerels': {'hdr': 'units_radiation_type_caster.h'}, 'units::milligray': {'hdr': 'units_radiation_type_caster.h'}, 'units::milligrays': {'hdr': 'units_radiation_type_caster.h'}, 'units::millisievert': {'hdr': 'units_radiation_type_caster.h'}, 'units::millisieverts': {'hdr': 'units_radiation_type_caster.h'}, 'units::nanobecquerel': {'hdr': 'units_radiation_type_caster.h'}, 'units::nanobecquerels': {'hdr': 'units_radiation_type_caster.h'}, 'units::nanogray': {'hdr': 'units_radiation_type_caster.h'}, 'units::nanograys': {'hdr': 'units_radiation_type_caster.h'}, 'units::nanosievert': {'hdr': 'units_radiation_type_caster.h'}, 'units::nanosieverts': {'hdr': 'units_radiation_type_caster.h'}, 'units::rad': {'hdr': 'units_radiation_type_caster.h'}, 'units::rads': {'hdr': 'units_radiation_type_caster.h'}, 'units::rutherford': {'hdr': 'units_radiation_type_caster.h'}, 'units::rutherfords': {'hdr': 'units_radiation_type_caster.h'}, 'units::sievert': {'hdr': 'units_radiation_type_caster.h'}, 'units::sieverts': {'hdr': 'units_radiation_type_caster.h'}, 'units::degree_squared_t': {'hdr': 'units_solid_angle_type_caster.h', 'darg': True}, 'units::kilosteradian_t': {'hdr': 'units_solid_angle_type_caster.h', 'darg': True}, 'units::microsteradian_t': {'hdr': 'units_solid_angle_type_caster.h', 'darg': True}, 'units::millisteradian_t': {'hdr': 'units_solid_angle_type_caster.h', 'darg': True}, 'units::nanosteradian_t': {'hdr': 'units_solid_angle_type_caster.h', 'darg': True}, 'units::spat_t': {'hdr': 'units_solid_angle_type_caster.h', 'darg': True}, 'units::steradian_t': {'hdr': 'units_solid_angle_type_caster.h', 'darg': True}, 'units::degree_squared': {'hdr': 'units_solid_angle_type_caster.h'}, 'units::degrees_squared': {'hdr': 'units_solid_angle_type_caster.h'}, 'units::kilosteradian': {'hdr': 'units_solid_angle_type_caster.h'}, 'units::kilosteradians': {'hdr': 'units_solid_angle_type_caster.h'}, 'units::microsteradian': {'hdr': 'units_solid_angle_type_caster.h'}, 'units::microsteradians': {'hdr': 'units_solid_angle_type_caster.h'}, 'units::millisteradian': {'hdr': 'units_solid_angle_type_caster.h'}, 'units::millisteradians': {'hdr': 'units_solid_angle_type_caster.h'}, 'units::nanosteradian': {'hdr': 'units_solid_angle_type_caster.h'}, 'units::nanosteradians': {'hdr': 'units_solid_angle_type_caster.h'}, 'units::spat': {'hdr': 'units_solid_angle_type_caster.h'}, 'units::spats': {'hdr': 'units_solid_angle_type_caster.h'}, 'units::steradian': {'hdr': 'units_solid_angle_type_caster.h'}, 'units::steradians': {'hdr': 'units_solid_angle_type_caster.h'}, 'units::mole_t': {'hdr': 'units_substance_type_caster.h', 'darg': True}, 'units::mole': {'hdr': 'units_substance_type_caster.h'}, 'units::moles': {'hdr': 'units_substance_type_caster.h'}, 'units::celsius_t': {'hdr': 'units_temperature_type_caster.h', 'darg': True}, 'units::fahrenheit_t': {'hdr': 'units_temperature_type_caster.h', 'darg': True}, 'units::kelvin_t': {'hdr': 'units_temperature_type_caster.h', 'darg': True}, 'units::rankine_t': {'hdr': 'units_temperature_type_caster.h', 'darg': True}, 'units::reaumur_t': {'hdr': 'units_temperature_type_caster.h', 'darg': True}, 'units::celsius': {'hdr': 'units_temperature_type_caster.h'}, 'units::fahrenheit': {'hdr': 'units_temperature_type_caster.h'}, 'units::kelvin': {'hdr': 'units_temperature_type_caster.h'}, 'units::rankine': {'hdr': 'units_temperature_type_caster.h'}, 'units::reaumur': {'hdr': 'units_temperature_type_caster.h'}, 'units::day_t': {'hdr': 'units_time_type_caster.h', 'darg': True}, 'units::gregorian_year_t': {'hdr': 'units_time_type_caster.h', 'darg': True}, 'units::hour_t': {'hdr': 'units_time_type_caster.h', 'darg': True}, 'units::julian_year_t': {'hdr': 'units_time_type_caster.h', 'darg': True}, 'units::kilosecond_t': {'hdr': 'units_time_type_caster.h', 'darg': True}, 'units::microsecond_t': {'hdr': 'units_time_type_caster.h', 'darg': True}, 'units::millisecond_t': {'hdr': 'units_time_type_caster.h', 'darg': True}, 'units::minute_t': {'hdr': 'units_time_type_caster.h', 'darg': True}, 'units::nanosecond_t': {'hdr': 'units_time_type_caster.h', 'darg': True}, 'units::second_t': {'hdr': 'units_time_type_caster.h', 'darg': True}, 'units::week_t': {'hdr': 'units_time_type_caster.h', 'darg': True}, 'units::year_t': {'hdr': 'units_time_type_caster.h', 'darg': True}, 'units::day': {'hdr': 'units_time_type_caster.h'}, 'units::days': {'hdr': 'units_time_type_caster.h'}, 'units::gregorian_year': {'hdr': 'units_time_type_caster.h'}, 'units::gregorian_years': {'hdr': 'units_time_type_caster.h'}, 'units::hour': {'hdr': 'units_time_type_caster.h'}, 'units::hours': {'hdr': 'units_time_type_caster.h'}, 'units::julian_year': {'hdr': 'units_time_type_caster.h'}, 'units::julian_years': {'hdr': 'units_time_type_caster.h'}, 'units::kilosecond': {'hdr': 'units_time_type_caster.h'}, 'units::kiloseconds': {'hdr': 'units_time_type_caster.h'}, 'units::microsecond': {'hdr': 'units_time_type_caster.h'}, 'units::microseconds': {'hdr': 'units_time_type_caster.h'}, 'units::millisecond': {'hdr': 'units_time_type_caster.h'}, 'units::milliseconds': {'hdr': 'units_time_type_caster.h'}, 'units::minute': {'hdr': 'units_time_type_caster.h'}, 'units::minutes': {'hdr': 'units_time_type_caster.h'}, 'units::nanosecond': {'hdr': 'units_time_type_caster.h'}, 'units::nanoseconds': {'hdr': 'units_time_type_caster.h'}, 'units::second': {'hdr': 'units_time_type_caster.h'}, 'units::seconds': {'hdr': 'units_time_type_caster.h'}, 'units::week': {'hdr': 'units_time_type_caster.h'}, 'units::weeks': {'hdr': 'units_time_type_caster.h'}, 'units::year': {'hdr': 'units_time_type_caster.h'}, 'units::years': {'hdr': 'units_time_type_caster.h'}, 'units::foot_poundal_t': {'hdr': 'units_torque_type_caster.h', 'darg': True}, 'units::inch_pound_t': {'hdr': 'units_torque_type_caster.h', 'darg': True}, 'units::meter_kilogram_t': {'hdr': 'units_torque_type_caster.h', 'darg': True}, 'units::newton_meter_t': {'hdr': 'units_torque_type_caster.h', 'darg': True}, 'units::foot_poundal': {'hdr': 'units_torque_type_caster.h'}, 'units::foot_poundals': {'hdr': 'units_torque_type_caster.h'}, 'units::inch_pound': {'hdr': 'units_torque_type_caster.h'}, 'units::inch_pounds': {'hdr': 'units_torque_type_caster.h'}, 'units::meter_kilogram': {'hdr': 'units_torque_type_caster.h'}, 'units::meter_kilograms': {'hdr': 'units_torque_type_caster.h'}, 'units::newton_meter': {'hdr': 'units_torque_type_caster.h'}, 'units::newton_meters': {'hdr': 'units_torque_type_caster.h'}, 'units::feet_per_second_t': {'hdr': 'units_velocity_type_caster.h', 'darg': True}, 'units::kilometers_per_hour_t': {'hdr': 'units_velocity_type_caster.h', 'darg': True}, 'units::knot_t': {'hdr': 'units_velocity_type_caster.h', 'darg': True}, 'units::meters_per_second_t': {'hdr': 'units_velocity_type_caster.h', 'darg': True}, 'units::miles_per_hour_t': {'hdr': 'units_velocity_type_caster.h', 'darg': True}, 'units::feet_per_second': {'hdr': 'units_velocity_type_caster.h'}, 'units::kilometers_per_hour': {'hdr': 'units_velocity_type_caster.h'}, 'units::knot': {'hdr': 'units_velocity_type_caster.h'}, 'units::knots': {'hdr': 'units_velocity_type_caster.h'}, 'units::meters_per_second': {'hdr': 'units_velocity_type_caster.h'}, 'units::miles_per_hour': {'hdr': 'units_velocity_type_caster.h'}, 'units::abvolt_t': {'hdr': 'units_voltage_type_caster.h', 'darg': True}, 'units::kilovolt_t': {'hdr': 'units_voltage_type_caster.h', 'darg': True}, 'units::microvolt_t': {'hdr': 'units_voltage_type_caster.h', 'darg': True}, 'units::millivolt_t': {'hdr': 'units_voltage_type_caster.h', 'darg': True}, 'units::nanovolt_t': {'hdr': 'units_voltage_type_caster.h', 'darg': True}, 'units::statvolt_t': {'hdr': 'units_voltage_type_caster.h', 'darg': True}, 'units::volt_t': {'hdr': 'units_voltage_type_caster.h', 'darg': True}, 'units::abvolt': {'hdr': 'units_voltage_type_caster.h'}, 'units::abvolts': {'hdr': 'units_voltage_type_caster.h'}, 'units::kilovolt': {'hdr': 'units_voltage_type_caster.h'}, 'units::kilovolts': {'hdr': 'units_voltage_type_caster.h'}, 'units::microvolt': {'hdr': 'units_voltage_type_caster.h'}, 'units::microvolts': {'hdr': 'units_voltage_type_caster.h'}, 'units::millivolt': {'hdr': 'units_voltage_type_caster.h'}, 'units::millivolts': {'hdr': 'units_voltage_type_caster.h'}, 'units::nanovolt': {'hdr': 'units_voltage_type_caster.h'}, 'units::nanovolts': {'hdr': 'units_voltage_type_caster.h'}, 'units::statvolt': {'hdr': 'units_voltage_type_caster.h'}, 'units::statvolts': {'hdr': 'units_voltage_type_caster.h'}, 'units::volt': {'hdr': 'units_voltage_type_caster.h'}, 'units::volts': {'hdr': 'units_voltage_type_caster.h'}, 'units::barrel_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::bushel_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::cord_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::cubic_fathom_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::cubic_foot_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::cubic_inch_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::cubic_kilometer_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::cubic_meter_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::cubic_mile_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::cubic_millimeter_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::cubic_yard_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::cup_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::dash_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::dram_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::drop_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::fifth_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::fluid_ounce_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::gallon_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::gill_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::kiloliter_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::liter_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::microliter_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::milliliter_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::nanoliter_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::peck_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::pinch_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::pint_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::quart_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::sack_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::shot_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::strike_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::tablespoon_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::teaspoon_t': {'hdr': 'units_volume_type_caster.h', 'darg': True}, 'units::barrel': {'hdr': 'units_volume_type_caster.h'}, 'units::barrels': {'hdr': 'units_volume_type_caster.h'}, 'units::bushel': {'hdr': 'units_volume_type_caster.h'}, 'units::bushels': {'hdr': 'units_volume_type_caster.h'}, 'units::cord': {'hdr': 'units_volume_type_caster.h'}, 'units::cords': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_fathom': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_fathoms': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_feet': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_foot': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_inch': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_inches': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_kilometer': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_kilometers': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_meter': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_meters': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_mile': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_miles': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_millimeter': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_millimeters': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_yard': {'hdr': 'units_volume_type_caster.h'}, 'units::cubic_yards': {'hdr': 'units_volume_type_caster.h'}, 'units::cup': {'hdr': 'units_volume_type_caster.h'}, 'units::cups': {'hdr': 'units_volume_type_caster.h'}, 'units::dash': {'hdr': 'units_volume_type_caster.h'}, 'units::dashes': {'hdr': 'units_volume_type_caster.h'}, 'units::dram': {'hdr': 'units_volume_type_caster.h'}, 'units::drams': {'hdr': 'units_volume_type_caster.h'}, 'units::drop': {'hdr': 'units_volume_type_caster.h'}, 'units::drops': {'hdr': 'units_volume_type_caster.h'}, 'units::fifth': {'hdr': 'units_volume_type_caster.h'}, 'units::fifths': {'hdr': 'units_volume_type_caster.h'}, 'units::fluid_ounce': {'hdr': 'units_volume_type_caster.h'}, 'units::fluid_ounces': {'hdr': 'units_volume_type_caster.h'}, 'units::gallon': {'hdr': 'units_volume_type_caster.h'}, 'units::gallons': {'hdr': 'units_volume_type_caster.h'}, 'units::gill': {'hdr': 'units_volume_type_caster.h'}, 'units::gills': {'hdr': 'units_volume_type_caster.h'}, 'units::kiloliter': {'hdr': 'units_volume_type_caster.h'}, 'units::kiloliters': {'hdr': 'units_volume_type_caster.h'}, 'units::liter': {'hdr': 'units_volume_type_caster.h'}, 'units::liters': {'hdr': 'units_volume_type_caster.h'}, 'units::microliter': {'hdr': 'units_volume_type_caster.h'}, 'units::microliters': {'hdr': 'units_volume_type_caster.h'}, 'units::milliliter': {'hdr': 'units_volume_type_caster.h'}, 'units::milliliters': {'hdr': 'units_volume_type_caster.h'}, 'units::nanoliter': {'hdr': 'units_volume_type_caster.h'}, 'units::nanoliters': {'hdr': 'units_volume_type_caster.h'}, 'units::peck': {'hdr': 'units_volume_type_caster.h'}, 'units::pecks': {'hdr': 'units_volume_type_caster.h'}, 'units::pinch': {'hdr': 'units_volume_type_caster.h'}, 'units::pinches': {'hdr': 'units_volume_type_caster.h'}, 'units::pint': {'hdr': 'units_volume_type_caster.h'}, 'units::pints': {'hdr': 'units_volume_type_caster.h'}, 'units::quart': {'hdr': 'units_volume_type_caster.h'}, 'units::quarts': {'hdr': 'units_volume_type_caster.h'}, 'units::sack': {'hdr': 'units_volume_type_caster.h'}, 'units::sacks': {'hdr': 'units_volume_type_caster.h'}, 'units::shot': {'hdr': 'units_volume_type_caster.h'}, 'units::shots': {'hdr': 'units_volume_type_caster.h'}, 'units::strike': {'hdr': 'units_volume_type_caster.h'}, 'units::strikes': {'hdr': 'units_volume_type_caster.h'}, 'units::tablespoon': {'hdr': 'units_volume_type_caster.h'}, 'units::tablespoons': {'hdr': 'units_volume_type_caster.h'}, 'units::teaspoon': {'hdr': 'units_volume_type_caster.h'}, 'units::teaspoons': {'hdr': 'units_volume_type_caster.h'}, 'units::curvature_t': {'hdr': 'units_compound_type_caster.h', 'darg': True}, 'units::compound_unit': {'hdr': 'units_compound_type_caster.h'}, 'units::inverse': {'hdr': 'units_compound_type_caster.h'}, 'units::dimensionless_t': {'hdr': 'units_misc_type_caster.h', 'darg': True}, 'units::dimensionless::dimensionless_t': {'hdr': 'units_misc_type_caster.h', 'darg': True}, 'units::scalar_t': {'hdr': 'units_misc_type_caster.h', 'darg': True}, 'units::dimensionless::scalar_t': {'hdr': 'units_misc_type_caster.h', 'darg': True}, 'units::dimensionless': {'hdr': 'units_misc_type_caster.h'}, 'units::dimensionless::dimensionless': {'hdr': 'units_misc_type_caster.h'}, 'units::scalar': {'hdr': 'units_misc_type_caster.h'}, 'units::dimensionless::scalar': {'hdr': 'units_misc_type_caster.h'}, 'frc::Vectord': {'hdr': 'pybind11/eigen.h'}, 'frc::Matrixd': {'hdr': 'pybind11/eigen.h'}})
def get_type_casters(casters):
t = {}
get_type_casters_cfg(t)
for k, v in t.items():
if "hdr" in v:
casters[k] = v["hdr"] | /robotpy_wpimath-2023.4.3.1-cp38-cp38-win_amd64.whl/wpimath/_impl/pkgcfg.py | 0.516839 | 0.286667 | pkgcfg.py | pypi |
import numpy as np
import RobotRaconteur as RR
from typing import NamedTuple
class JointTrajectoryLimits(NamedTuple):
j_max: np.array
a_max: np.array
v_max: np.array
x_min: np.array
x_max: np.array
class JointTrajectoryPositionRequest(NamedTuple):
current_position: np.array
current_velocity: np.array
desired_position: np.array
desired_velocity: np.array
max_velocity: np.array
desired_time: float = None
speed_ratio: float = 1.
splice_time: float = None
class JointTrajectoryVelocityRequest(NamedTuple):
current_position: np.array
current_velocity: np.array
desired_velocity: np.array
timeout: float
speed_ratio: float
desired_time: float = None
class JointTrajectoryPositionCommand(NamedTuple):
command_position: np.array
command_velocity: np.array
class TrapezoidalJointTrajectoryGenerator:
def __init__(self, joint_count, limits):
self._joint_count=joint_count
self._limits=limits
self._speed_ratio = 1.0
self._t_des = 0.0
self._exec = None
@property
def t_des(self):
return self._t_des
@property
def speed_ratio(self):
return self._speed_ratio
@property
def t_final(self):
if self._exec is not None:
return self._exec.t_final
else:
return 0.
@property
def is_valid(self):
return self._exec is not None
@property
def target_position(self):
return self._exec.xf
@property
def target_velocity(self):
return self._exec.v3
def get_command(self, t):
if self._exec is None:
return False, None
res, command_position, command_velocity = self._exec.calc_at_time(t)
if not res:
return False, None
command = JointTrajectoryPositionCommand(
command_position = command_position,
command_velocity = command_velocity
)
return True, command
def update_desired_position(self, request):
assert request.desired_time is None, "desired time not supported"
self._exec = TrapezoidalJointTrajectoryGeneratorCalc.initialize_pos_exec(self._joint_count, self._limits, request)
return True
def update_desired_velocity(self, request):
self._exec = TrapezoidalJointTrajectoryGeneratorCalc.initialize_vel_exec(self._joint_count, self._limits, request)
return True
class TrapezoidalJointTrajectoryGeneratorCalc:
@classmethod
def initialize_pos_exec(cls, joint_count, limits, request):
assert request.desired_time is None
a_max = np.copy(limits.a_max)
v_max = np.copy(limits.v_max)
if request.speed_ratio != 0.0:
a_max *= request.speed_ratio
v_max *= request.speed_ratio
if request.max_velocity is not None:
req_vel = request.max_velocity * request.speed_ratio
assert np.all(req_vel <= v_max), "req_vel must be less than or equal to v_max"
v_max = np.minimum(req_vel,v_max)
else:
assert np.all(request.max_velocity <= v_max), "req_vel must be less than or equal to v_max"
v_max = np.minimum(request.max_velocity,v_max)
dx = request.desired_position - request.current_position
t1 = np.zeros((joint_count,))
t2 = np.zeros((joint_count,))
t3 = np.zeros((joint_count,))
v1 = np.zeros((joint_count,))
a1 = np.zeros((joint_count,))
a3 = np.zeros((joint_count,))
for i in range(joint_count):
if dx[i] == 0 and request.desired_velocity[i] == 0 and request.current_velocity[i] == 0:
continue
case2_success, v1[i], a1[i], a3[i], t1[i], t2[i], t3[i] = \
cls.solve_case2(
request.current_position[i], request.desired_position[i], request.current_velocity[i],
request.desired_velocity[i], v_max[i], a_max[i]
)
if not case2_success:
case3_success, a1[i], a3[i], t1[i], t3[i] = \
cls.solve_case3(
request.current_position[i], request.desired_position[i], request.current_velocity[i],
request.desired_velocity[i], a_max[i]
)
t2[i] = 0
v1[i] = 0
assert case3_success, "Invalid trajectory request"
t1_4 = np.max(t1)
t2_4 = np.max(t2)
t3_4 = np.max(t3)
a1_4 = np.zeros((joint_count,))
a3_4 = np.zeros((joint_count,))
v1_4 = np.zeros((joint_count,))
for i in range(joint_count):
v1_4[i], a1_4[i], a3_4[i] = cls.solve_case4(
request.current_position[i], request.desired_position[i], request.current_velocity[i],
request.desired_velocity[i], t1_4, t2_4, t3_4)
return TrapezoidalJointTrajectoryGeneratorExec(
joint_count = joint_count,
t1 = t1_4,
t2 = t2_4,
t3 = t3_4,
x1 = request.current_position,
v1 = request.current_velocity,
v2 = v1_4,
v3 = request.desired_velocity,
a1 = a1_4,
a3 = a3_4,
xf = request.desired_position
)
@staticmethod
def pos_1(a, v, x, t):
return (0.5 * a * pow(t, 2) if a != 0 else 0.0) + v*t + x
@staticmethod
def vel_1(a, v, t):
return (a *t if a != 0 else 0.0) + v
@classmethod
def pos(cls,a1, a3, x0, v0, t1, t2, t3):
v1_p = cls.vel_1(a1, v0, t1)
v3_p = cls.vel_1(a3, v1_p, t3)
x1_p = cls.pos_1(a1, v0, x0, t1)
x2_p = cls.pos_1(0, v1_p, x1_p, t2)
x3_p = cls.pos_1(a3, v1_p, x2_p, t3)
return x3_p, v3_p
@classmethod
def solve_case2_sub(cls, x0, xf, v0, v1, vf, a_max):
t1 = 0
a1 = 0
if v1 != v0:
a1 = a_max * np.sign(v1 - v0)
t1 = (v1-v0) / a1
t3 = 0
a3 = 0
if vf != v1:
a3 = a_max * np.sign(vf-v1)
t3 = (vf - v1) / a3
xf_1, vf_1 = cls.pos(a1, a3, x0, v0, t1, 0.0, t3)
dx2 = xf - xf_1
t2 = dx2/v1
return t2 >= 0, a1, a3, t1, t2, t3
@classmethod
def solve_case2(cls, x0, xf, v0, vf, v_max, a_max):
case2_res, a1, a3, t1, t2, t3 = cls.solve_case2_sub(x0, xf, v0, v_max, vf, a_max)
if case2_res:
return True, v_max, a1, a3, t1, t2, t3
case2_res, a1, a3, t1, t2, t3 = cls.solve_case2_sub(x0, xf, v0, -v_max, vf, a_max)
if case2_res:
return True, -v_max, a1, a3, t1, t2, t3
return False, None, None, None, None, None, None
@staticmethod
def solve_case3_sub1(x0, xf, v0, vf, a1):
return a1 * (xf - x0) + 0.5 * pow(v0, 2.0) + 0.5 * pow(vf, 2.0)
@classmethod
def solve_case3(cls, x0, xf, v0, vf, a_max):
sub1 = cls.solve_case3_sub1(x0, xf, v0, vf, a_max)
if sub1 >= 0:
a1 = a_max
a3 = -a_max
t1 = (-v0 + np.sqrt(sub1)) / a1
t3 = (a1*t1 + v0-vf) / a1
if t1 > 0 and t3 > 0:
return True, a1, a3, t1, t3
t1 = (-v0 - np.sqrt(sub1))/ a1
t3 = a1 * (a1 * t1 + v0 - vf) / a1
if t1 > 0 and t3 > 0:
return True, a1, a3, t1, t3
sub1 = cls.solve_case3_sub1(x0, xf, v0, vf, -a_max)
if sub1 >= 0:
a1 = -a_max
a3 = a_max
t1 = (-v0 + np.sqrt(sub1)) / a1
t3 = (a1 * t1 + v0 - vf) / a1
if t1 > 0 and t3 > 0:
return True, a1, a3, t1, t3
t1 = (-v0 - np.sqrt(sub1)) / a1
t3 = (a1 * t1 + v0 - vf) / a1
if (t1 > 0 and t3 > 0):
return True, a1, a3, t1, t3
return False, None, None, None, None
@classmethod
def solve_case4(cls, x0, xf, v0, vf, t1, t2, t3):
a1_den = t1 * (t1 + 2*t2 + t3)
a1 = (-2 * t1 * v0 - 2 * t2 * v0 - t3 * v0 - t3 * vf - 2 * x0 + 2 * xf) / a1_den if a1_den !=0 else 0.0
v1 = a1 * t1 + v0
a3 = 0.0
if t3 != 0:
a3 = (-a1 * t1 - v0 + vf) / t3
return v1, a1, a3
@classmethod
def initialize_vel_exec(cls, joint_count, limits, request):
a_max = np.copy(limits.a_max)
v_max = np.copy(limits.v_max)
if request.speed_ratio != 0.0:
a_max *= request.speed_ratio
v_max *= request.speed_ratio
req_vel = request.desired_velocity * request.speed_ratio
assert np.all(req_vel <= v_max), "req_vel must be less than or equal to v_max"
v_max = np.minimum(v_max, req_vel)
else:
assert np.all(request.max_velocity <= v_max), "req_vel must be less than or equal to v_max"
v_max = np.minimum(request.max_velocity,v_max)
t1 = np.zeros((joint_count,))
t2 = np.zeros((joint_count,))
t3 = np.zeros((joint_count,))
v1 = np.zeros((joint_count,))
a1 = np.zeros((joint_count,))
a3 = np.zeros((joint_count,))
for i in range(joint_count):
if request.desired_velocity[i] == 0 and request.current_velocity[i] == 0:
continue
a_max[i], v1[i], a1[i], a3[i], t1[i], t2[i], t3[i] = cls.solve_case5(request.current_velocity[i],
request.desired_velocity[i], 0, request.timeout, a_max[i])
t1_4 = np.max(t1)
t2_4 = np.max(t2)
t3_4 = np.max(t3)
a1_4 = np.zeros((joint_count,))
a3_4 = np.zeros((joint_count,))
for i in range(joint_count):
a1_4[i], a3_4[i] = cls.solve_case6(request.current_velocity[i], v1[i], 0, t1_4, t2_4, t3_4)
ret = TrapezoidalJointTrajectoryGeneratorExec(
joint_count = joint_count,
t1 = t1_4,
t2 = t2_4,
t3 = t3_4,
x1 = request.current_position,
v1 = request.current_velocity,
v2 = v1,
v3 =np.zeros((joint_count,)),
a1 = a1_4,
a3 = a3_4,
xf = None
)
return ret
@staticmethod
def solve_case5(v0, v1, vf, timeout, a_max):
t1 = 0.0
a1 = 0.0
if (v1 != v0):
a1 = a_max * np.sign(v1 - v0)
t1 = (v1 - v0) / a1
if (t1 > timeout):
v1 = a1 * timeout
t1 = timeout
t3 = 0
a3 = 0
if (vf != v1):
a3 = a_max * np.sign(vf - v1)
t3 = (vf - v1) / a3
t2 = timeout - t1
v1_res = v1
return True, v1_res, a1, a3, t1, t2, t3
@staticmethod
def solve_case6(v0, v1, vf, t1, t2, t3):
a1_den = t1
a1 = (v1-v0) / a1_den if (a1_den != 0) else 0.0
a3 = 0
if (t3 != 0):
a3 = (-a1 * t1 - v0 + vf) / t3
return a1, a3
class TrapezoidalJointTrajectoryGeneratorExec:
def __init__(self, joint_count, t1, t2, t3, x1, v1, v2, v3, a1, a3, xf):
self.joint_count = joint_count
self.t1 = t1
self.t2 = t2
self.t3 = t3
self.x1 = x1
self.v1 = v1
self.v2 = v2
self.v3 = v3
self.a1 = a1
self.a3 = a3
self.xf = xf
self.t_final = t1 + t2 +t3
self.x2 = None
self.x3 = None
@staticmethod
def pos_1(a, v, x, t):
return (0.5 * a * pow(t, 2) if a != 0 else 0.0) + v*t + x
@staticmethod
def vel_1(a, v, t):
return (a *t if a != 0 else 0.0) + v
@classmethod
def pos(cls, n, a, v, x, t):
o = np.zeros((n,))
for i in range(n):
if a is not None:
o[i] = cls.pos_1(a[i], v[i], x[i], t)
else:
o[i] = cls.pos_1(0.0, v[i], x[i], t)
return o
@classmethod
def vel(cls, n, a, v, t):
o = np.zeros((n,))
for i in range(n):
if a is not None:
o[i] = cls.vel_1(a[i], v[i], t)
else:
o[i] = cls.vel_1(0.0, v[i], t)
return o
def calc_at_time(self, t):
if self.x2 is None:
self.x2 = self.pos(self.joint_count, self.a1, self.v1, self.x1, self.t1)
if self.x3 is None:
self.x3 = self.pos(self.joint_count, None, self.v2, self.x2, self.t2)
if self.xf is None:
self.xf = self.pos(self.joint_count, self.a3, self.v2, self.x3, self.t3)
if t < 0:
return False, None, None
if (t < self.t1):
x = self.pos(self.joint_count, self.a1, self.v1, self.x1, t)
v = self.vel(self.joint_count, self.a1, self.v1, t)
return True, x, v
if (t < self.t2 + self.t1):
x = self.pos(self.joint_count, None, self.v2, self.x2, t - self.t1)
v = self.vel(self.joint_count, None, self.v2, t - self.t1)
return True, x, v
if (t < self.t_final):
x = self.pos(self.joint_count, self.a3, self.v2, self.x3, t - self.t1 -self.t2)
v = self.vel(self.joint_count, self.a3, self.v2, t - self.t1 - self.t2)
return True, x, v
x = self.pos(self.joint_count, None, self.v3, self.xf, t - self.t_final)
v = self.vel(self.joint_count, None, self.v3, t - self.t_final)
return True, x, v | /robotraconteur_abstract_robot-0.2.1-py3-none-any.whl/robotraconteur_abstract_robot/trapezoidal_joint_trajectory_generator.py | 0.864454 | 0.52829 | trapezoidal_joint_trajectory_generator.py | pypi |
from enum import Enum
import traceback
from turtle import down
import RobotRaconteur as RR
RRN = RR.RobotRaconteurNode.s
from RobotRaconteurCompanion.Util.RobotUtil import RobotUtil
from RobotRaconteurCompanion.Util.DateTimeUtil import DateTimeUtil
from RobotRaconteurCompanion.Util.GeometryUtil import GeometryUtil
from RobotRaconteurCompanion.Util.SensorDataUtil import SensorDataUtil
import time
import threading
import numpy as np
from abc import ABC, abstractmethod
from .joint_trajectory_interpolator import JointTrajectoryInterpolator
from .trapezoidal_joint_trajectory_generator import JointTrajectoryLimits, JointTrajectoryPositionRequest, \
JointTrajectoryVelocityRequest, JointTrajectoryPositionCommand, TrapezoidalJointTrajectoryGenerator
class AbstractRobot(ABC):
"""
Abstact base class for standard Robot Raconteur robot device drivers. Subclasses implement specific functionality
for each robot controller type. Typically, these drivers communicate with the vendor controller. The vender
controller may provide the communication method natively, or the vendor controller may need to execute
special programs provided by the driver.
The driver uses a ``RobotInfo`` structure to initialize information about kinematics etc. The __init__
function should also be overridden to initialize various instance variables. The ``robot_info`` parameter
is typically loaded from a YAML file.
AbstractRobot uses a real-time loop that periodically calls ``_run_timestep()``, with the period set by
``_update_period``. ``_run_timestep()`` does the following, some of which the subclass must implement:
#. Read feedback from driver (must be implemented by subclass).
Update ``_joint_position``, ``_joint_velocity`` (optional), ``_joint_effort`` (optional), ``_endpoint_pose``,
``_endpoint_vel`` (optional), ``_ready``, ``_enabled``, ``_stopped``, ``_error``, ``_estop_source``,
``_last_robot_state``, ``_last_joint_state``, and ``_last_endpoint_state``. These updates may happen
outside the loop, when the data is received from the robot. Hold ``_lock`` when updating data if not
inside the loop.
#. Verify communication by calling ``_verify_communication()``. If ``_last_robot_state``, ``_last_joint_state``,
or ``_last_endpoint_state`` exceed ``_communication_timeout`` relative to stopwatch time, set communication
failure.
#. Verify the current robot state by calling ``_verify_robot_state()
#. Fill a joint position or joint velocity command by calling ``_fill_robot_command()``. This will check the
current operational mode and commands from the client to generate the next command.
#. Fill the robot state structures to return to clients. Calls ``_fill_states()``, ``_fill_state_flags()``,
``_calc_endpoint_poses()``, and ``_calc_endpoint_vels()``
#. If a valid command is available, send to the robot using ``_send_robot_command()``. Subclass must implement
this function.
At a minimum, a driver subclass must fill feedback data from the robot as shown in step 1 above, and must
implement ``_send_robot_command()``, ``_send_disable()``, ``_send_enable()``, and ``_send_reset_errors()``.
See the example minimal ABB robot driver. Also see abb_robotraconteur_driver_hmp for a more sophisticated driver.
:ivar _robot_info: The ``RobotInfo`` structure, initialized from __init__ parameter
:ivar _joint_names: The names of the robot joints. Initialized from ``robot_info`` or ``default_joint_count``
:ivar _joint_count: The number of robot joints. Initialized from ``robot_info`` or ``default_joint_count``
:ivar _robot_uuid: The UUID of the robot. Initialized from the ``robot_info`` structure
:ivar _robot_caps: The capability flags of the robot taken from ``RobotCapabilities`` enum. By default initialized
from ``robot_info`, but it is recommended the driver override this value in __init__
:ivar _robot_util: Companion ``RobotUtil`` utility class instance
:ivar _datetime_util: Companion ``DateTimeUtil`` utility class instance
:ivar _geometry_util: Companion ``GeometryUtil`` utility class instance
:ivar _sensor_data_util: Companion ``SensorDataUtil`` utility class instance
:ivar _pose_dtype: ``com.robotraconteur.geometry.Pose`` numpy dtype
:ivar _spatial_velocity_dtype: ``com.robotraconteur.geometry.SpatialVelocity`` numpy dtype
:ivar _robot_state_type: ``RobotState`` structure type
:ivar _advanced_robot_state_type: ``AdvancedRobotState`` structure type
:ivar _robot_state_sensor_data_type: ``RobotStateSensorData`` structure type
:ivar _robot_joint_command_type: ``RobotJointCommand`` structure type
:ivar _isoch_info_type: ``IsochInfo`` structure type
:ivar _robot_consts: Constants from ``com.robotraconteur.robotics.robot``
:ivar _robot_capabilities: ``RobotCapabilities`` enum
:ivar _robot_command_mode: ``RobotCommandMode`` enum
:ivar _robot_operational_mode: ``RobotOperationalMode`` enum
:ivar _robot_controller_state: ``RobotControllerState`` enum
:ivar _robot_state_flags: ``RobotStateFlags`` enum
:ivar _joint_consts: Constants from ``com.robotraconteur.robotics.joints``
:ivar _joint_position_units: ``JointPositionUnits`` enum
:ivar _joint_effort_units: ``JointEffortUnits`` enum
:ivar _uses_homing: Robot uses homing command. Initialized from capabilities flags in ``robot_info``.
Recommended to override in __init__
:ivar _has_position_command: Robot has streaming position command. Initialized from capabilities flags in
``robot_info``. Recommended to override in __init__
:ivar _has_velocity_command: Robot has streaming velocity command. Initialized from capabilities flags in
``robot_info``. Recommended to override in __init__
:ivar _has_jog_command: Robot has jog command. Initialized from capabilities flags in
``robot_info``. Recommended to override in __init__
:ivar _current_tool: Currently attached robot tool. Array, one entry per chain. Initialized from ``robot_info``,
updated using ``tool_attached()`` and ``tool_detached()``
:ivar _current_payload: Currently attached payload. Array, one entry per chain. Initialized from ``robot_info``,
updated using ``payload_attached()`` and ``payload_detached()``
:ivar _current_payload_pose: Pose of currently attached payload relative to tool TCP. Array, one entry per chain.
Initialized from ``robot_info``, updated using ``payload_attached()``
and ``payload_detached()``
:ivar _keep_going: Boolean flag to stop loop
:ivar _update_period: The update period of the loop (aka timestep). Should be set in __init__
:ivar _speed_ratio: The current speed ratio. Set using ``speed_ratio`` property
:ivar _jog_joint_limit: The maximum joint distance allowed during a jog command
:ivar _trajectory_error_tol: The maximum error allowed between command and robot position during trajectory
execution
:ivar _command_mode: The current command mode. Set using ``command_mode`` property, and updated during operation
due to errors or other events.
:ivar _operational_mode: The operational mode of the vendor robot controller, using values from
``RobotOperationalMode`` enum. Should be
updated every timestep if available. Set ``_base_set_operational_mode`` to False
if used.
:ivar _controller_state: The controller state of the vendor robot controller, using values from
``RobotOperationalMode`` enum. Should be
updated every timestep if available. Set ``_base_set_controller_state`` to False
if used.
:ivar _joint_position: Current joint position based on feedback in radians (or meters). This value should be
updated every timestep using robot feedback.
:ivar _joint_velocity: Current joint velocity based on feedback in radians/s (or meters/s). This value should be
updated every timestep using robot feedback. Leave as empty array if velocity feedback
not available.
:ivar _joint_effort: Current joint effort based on feedback in Nm (or N). This value should be
updated every timestep using robot feedback. Leave as empty array if effort feedback
not available.
:ivar _position_command: Current position command. Set by the subclass after issuing command to robot. This
value is used for client state information.
:ivar _velocity_command: Current velocity command. Set by the subclass after issuing command to robot. This
value is used for client state information.
:ivar _endpoint_pose: Array of endpoint poses, one entry per chain. Update every timestep. Units should be in
meters, quaternions, relative to world or base of robot.
:ivar _endpoint_vel: Array of endpoint velocities, one entry per chain. Update every timestep. Units should be in
meters/s, radians/s, relative to world or base of robot.
:ivar _last_robot_state: The stopwatch time in seconds of the last state update received from the robot.
Must be updated to avoid communication timeout.
:ivar _last_joint_state: The stopwatch time in seconds of the last joint position update received from the robot.
Must be updated to avoid communication timeout.
:ivar _last_endpoint_state: The stopwatch time in seconds of the last endpoint update received from the robot.
Must be updated to avoid communication timeout.
:ivar _state_seqno: Counter of number of loop iterations executed (sequence number)
:ivar _homed: Set to True if robot is homed. Only valid if robot has homing capability
:ivar _ready: Set to True if robot is ready to move. Should be updated every timestep
:ivar _enabled: Set to True if robot is enabled with motors on. Should be updated every timestep. Robot may
be enabled but not ready
:ivar _stopped: Set to True if robot is stopped due to an estop. Should be updated every timestep
:ivar _error: Set to True if robot is in an error state. Should be updated every timestep. Errors are reset by
switching to halt more, calling ``reset_errors()``, and/or clearing the error on the vendor
controller, in escalating levels of severity.
:ivar _estop_source: The source of the estop, using values from ``RobotStateFlags``
:ivar _communication_failure: Set by ``_verify_communication`` based on ``_communication_timeout``
:ivar _communication_timeout: Communication timeout in seconds. If no updates are received from the controller
within the communication timeout, an error condition is set
:ivar _broadcast_downsampler: Broadcast downsampler used by all wires and pipes to control data rate sent to client
:ivar position_command: Wire populated by Robot Raconteur to receive streaming position commands. Only used
in ``position_command`` command mode
:ivar velocity_command: Wire populated by Robot Raconteur to receive streaming position commands. Only used
in ``velocity_command`` command mode
:ivar _wires_ready: Set to True when wires and pipes have been initialized by Robot Raconteur
:ivar _config_seqno: The sequence number returned as part of ``RobotInfo``. Incremented as tools and payloads
are attached/detached.
:ivar _base_set_operational_mode: If True, abstract robot will set ``_operational_mode`` to a default value.
Set to False if driver will update ``_operational_mode``
:ivar _base_set_controller_state: If True, abstract robot will set ``_controller_state`` to a default value.
Set to False if driver will update ``_controller_state``
:ivar _lock: Lock to hold when updating data to prevent race conditions
:param robot_info: The ``RobotInfo`` structure for the robot
:param default_joint_count: The default number of joints for the robot
:param node: The Robot Raconteur node for the driver
"""
def __init__(self, robot_info, default_joint_count, node = None ):
super().__init__()
if node is None:
self._node = RRN
else:
self._node = node
self._robot_info = robot_info
if robot_info.joint_info is not None:
j_names = []
for j_info in robot_info.joint_info:
j_names.append(j_info.joint_identifier.name)
self._joint_names = j_names
else:
assert default_joint_count > 0, "Joints must be specified in RobotInfo structure"
self._joint_names = [f"joint_{x}" for x in range(default_joint_count)]
self._joint_count = len(self._joint_names)
self._robot_uuid = robot_info.device_info.device.uuid
self._robot_caps = robot_info.robot_capabilities
self._robot_util = RobotUtil(self._node)
self._datetime_util = DateTimeUtil(self._node)
self._geometry_util = GeometryUtil(self._node)
self._sensor_data_util = SensorDataUtil(self._node)
self._pose_dtype = self._node.GetNamedArrayDType("com.robotraconteur.geometry.Pose")
self._spatial_velocity_dtype = self._node.GetNamedArrayDType("com.robotraconteur.geometry.SpatialVelocity")
self._robot_state_type = self._node.GetStructureType("com.robotraconteur.robotics.robot.RobotState")
self._advanced_robot_state_type = self._node.GetStructureType("com.robotraconteur.robotics.robot.AdvancedRobotState")
self._robot_state_sensor_data_type = self._node.GetStructureType("com.robotraconteur.robotics.robot.RobotStateSensorData")
self._robot_joint_command_type = self._node.GetStructureType("com.robotraconteur.robotics.robot.RobotJointCommand")
self._isoch_info_type = self._node.GetStructureType("com.robotraconteur.device.isoch.IsochInfo")
self._robot_consts = self._node.GetConstants("com.robotraconteur.robotics.robot")
self._robot_capabilities = self._robot_consts["RobotCapabilities"]
self._robot_command_mode = self._robot_consts["RobotCommandMode"]
self._robot_operational_mode = self._robot_consts["RobotOperationalMode"]
self._robot_controller_state = self._robot_consts["RobotControllerState"]
self._robot_state_flags = self._robot_consts["RobotStateFlags"]
self._joint_consts = self._node.GetConstants("com.robotraconteur.robotics.joints")
self._joint_position_units = self._joint_consts["JointPositionUnits"]
self._joint_effort_units = self._joint_consts["JointEffortUnits"]
self._uses_homing = (self._robot_caps & self._robot_capabilities["homing_command"]) != 0
self._has_position_command = (self._robot_caps & self._robot_capabilities["position_command"]) != 0
self._has_velocity_command = (self._robot_caps & self._robot_capabilities["velocity_command"]) != 0
self._has_jog_command = (self._robot_caps & self._robot_capabilities["jog_command"]) != 0
try:
self._rox_robots = []
for chain_i in range(len(self._robot_info.chains)):
self._rox_robots.append(self._robot_util.robot_info_to_rox_robot(self._robot_info,chain_i))
except:
traceback.print_exc()
raise ValueError("invalid robot_info, could not populate GeneralRoboticsToolbox.Robot")
self._current_tool = [None]*len(self._robot_info.chains)
self._current_payload = [None]*len(self._robot_info.chains)
self._current_payload_pose = [None]*len(self._robot_info.chains)
for i in range(len(self._robot_info.chains)):
if self._robot_info.chains[i].current_tool is not None:
self._current_tool[i] = self._robot_info.chains[i].current_tool
if self._robot_info.chains[i].current_payload is not None:
self._current_payload[i] = self._robot_info.chains[i].current_payload
for i in range(self._joint_count):
limits = robot_info.joint_info[i].joint_limits
assert limits.velocity > 0, f"Invalid joint velocity for joint {i}"
if limits.reduced_velocity <= 0:
limits.reduced_velocity = limits.velocity
assert limits.acceleration > 0, f"Invalid joint acceleration for joint {i}"
if limits.reduced_acceleration <= 0:
limits.reduced_acceleration = limits.acceleration
self._keep_going = False
self._stopwatch_epoch = None
self._stopwatch_start = None
self._loop_thread = None
self._update_period = 0.01
self._wait_event = threading.Event()
self._last_robot_state = 0
self._last_joint_state = 0
self._last_endpoint_state = 0
self._state_seqno = 0
self._speed_ratio = 1.0
self._jog_joint_limit = np.deg2rad(1000.)
self._trajectory_error_tol = np.deg2rad(5.)
self._command_mode = self._robot_command_mode["halt"]
self._operational_mode = self._robot_operational_mode["manual_reduced_speed"]
self._controller_state = self._robot_operational_mode["undefined"]
self._joint_position = np.zeros((0,))
self._joint_velocity = np.zeros((0,))
self._joint_effort = np.zeros((0,))
self._position_command = None
self._velocity_command = None
self._endpoint_pose = []
self._endpoint_vel = []
self._homed = False
self._ready = False
self._enabled = False
self._stopped = False
self._error = False
self._estop_source = 0
self._communication_failure = True
self._communication_timeout = 0.25
self._broadcast_downsampler = None
self._wire_position_command_sent = False
self._wire_velocity_command_sent = False
self._wire_position_command_last_seqno = 0
self._wire_velocity_command_last_seqno = 0
self._wire_position_command_last_ep = 0
self._wire_velocity_command_last_ep = 0
self._trajectory_valid = False
self._trajectory_current_time = 0
self._trajectory_max_time = 0
self._trajectory_waypoint = 0
self._lock = threading.Lock()
self._wires_ready = False
self._active_trajectory = None
self._queued_trajectories = []
self._jog_start_time = 0.
self._jog_trajectory_generator = None
self._jog_completion_handler = None
self._config_seqno = 1
self._base_set_operational_mode = True
self._base_set_controller_state = True
def RRServiceObjectInit(self, context, service_path):
self.robot_state_sensor_data.MaxBacklog = 3
self._broadcast_downsampler = RR.BroadcastDownsampler(context, 0)
self._broadcast_downsampler.AddPipeBroadcaster(self.robot_state_sensor_data)
self._broadcast_downsampler.AddWireBroadcaster(self.robot_state)
self._broadcast_downsampler.AddWireBroadcaster(self.advanced_robot_state)
self._broadcast_downsampler.AddWireBroadcaster(self.device_clock_now)
self._wires_ready = True
def _perf_counter(self) -> float:
"""
System performance counter in seconds. This counter is not relative to real time clock.
:return: Performance counter time in seconds
"""
return time.perf_counter()
def _stopwatch_ellapsed_s(self) -> float:
"""
Stopwatch time in seconds. Relative to start of driver loop.
:return: Stopwatch time in seconds
"""
return self._perf_counter() - self._stopwatch_start
def _start_robot(self):
"""
Start the robot driver loop
"""
self._stopwatch_epoch = self._datetime_util.TimeSpec2Now()
self._stopwatch_start = self._perf_counter()
self._keep_going = True
self._loop_thread = threading.Thread(target = self._loop_thread_func)
self._loop_thread.daemon = True
self._loop_thread.start()
def _stop_robot(self):
"""
Stop the robot driver loop
"""
self._keep_going = False
self._loop_thread.join()
def _loop_thread_func(self):
"""
Loop thread entry function. This function runs the loop, and calls ``run_timestep()`` periodically at
``_update_period`` specified in seconds.
"""
next_wait = self._stopwatch_ellapsed_s()
now = next_wait
while self._keep_going:
now = self._stopwatch_ellapsed_s()
self._run_timestep(now)
while True:
next_wait += self._update_period
if next_wait > now:
break
while True:
now = self._stopwatch_ellapsed_s()
if now >= next_wait:
break
time.sleep(next_wait-now)
def _close(self):
"""
Close the driver, stop the loop
"""
self._keep_going = False
try:
self._loop_thread.join(timeout=1)
except:
pass
def _run_timestep(self, now):
"""
Called by loop each timestep at ``_update_timestep`` period in seconds
:param now: stopwatch time in seconds
"""
res = False
joint_pos_cmd = None
joint_vel_cmd = None
rr_robot_state = None
rr_advanced_robot_state = None
rr_state_sensor_data = None
downsampler_step = None
with self._lock:
if self._wires_ready:
downsampler_step = RR.BroadcastDownsamplerStep(self._broadcast_downsampler)
self._state_seqno += 1
res = self._verify_communication(now)
res = res and self._verify_robot_state(now)
res_fill, joint_pos_cmd, joint_vel_cmd = self._fill_robot_command(now)
res = res and res_fill
rr_robot_state, rr_advanced_robot_state, rr_state_sensor_data = self._fill_states(now)
if res:
self._send_robot_command(now, joint_pos_cmd, joint_vel_cmd)
if downsampler_step:
with downsampler_step:
self._send_states(now, rr_robot_state, rr_advanced_robot_state, rr_state_sensor_data)
def _fill_state_flags(self, now):
"""
Fill ``_robot_state_flags`` based on current state of driver. Called by the loop each timestep to update
driver state
:param now: stopwatch time in seconds
"""
f = 0
if self._communication_failure:
f |= self._robot_state_flags["communication_failure"]
return f
if self._error:
f |= self._robot_state_flags["error"]
if self._stopped:
f |= self._robot_state_flags["estop"]
if self._estop_source == 0:
pass
elif self._estop_source == 1:
f |= self._robot_state_flags["estop_button1"]
elif self._estop_source == 2:
f |= self._robot_state_flags["estop_other"]
elif self._estop_source == 3:
f |= self._robot_state_flags["estop_fault"]
elif self._estop_source == 4:
f |= self._robot_state_flags["estop_internal"]
if self._enabled:
f |= self._robot_state_flags["enabled"]
if self._ready:
f |= self._robot_state_flags["ready"]
if self._uses_homing:
if self._homed:
f |= self._robot_state_flags["homed"]
else:
f |= self._robot_state_flags["homing_required"]
if self._wire_position_command_sent:
f |= self._robot_state_flags["valid_position_command"]
if self._wire_velocity_command_sent:
f |= self._robot_state_flags["valid_velocity_command"]
if self._trajectory_valid:
f |= self._robot_state_flags["trajectory_running"]
return f
def _calc_endpoint_pose(self, chain):
"""
Compute endpoint pose for specified chain. By default uses ``_endpoint_pose[chain]`` and transforms
to the TCP of ``self._current_tool[chain]``. If the robot reports the endpoint position with the tool
transform applied, this should return ``self._endpoint_pose[chain]``
Called by the loop each timestep to update driver state.
:param chain: The chain index, always 0 for single arm driver
:rtype: com.robotraconteur.geometry.Pose
:return: The pose of the end effector
"""
# CALL LOCKED!
if self._current_tool[chain] is None:
return self._endpoint_pose[chain]
endpoint_transform = self._geometry_util.pose_to_rox_transform(self._endpoint_pose[chain])
tool_transform = self._geometry_util.transform_to_rox_transform(self._current_tool[chain].tcp)
res = endpoint_transform * tool_transform
return self._geometry_util.rox_transform_to_pose(res)
def _calc_endpoint_poses(self):
"""
Compute the endpoints of all chains. Calls ``_calc_endpoint_pose()`` for each chain.
Called by the loop each timestep to update driver state.
:rtype: com.robotraconteur.geometry.Pose[]
:return: Array of all chain poses. Single element array for single arm drivers
"""
if self._endpoint_pose is None:
return np.zeros((0,), dtype=self._pose_dtype)
n = len(self._endpoint_pose)
o = np.zeros((n,), dtype=self._pose_dtype)
for i in range(n):
o[i] = self._calc_endpoint_pose(i)
return o
def _calc_endpoint_vel(self, chain):
"""
Compute spatial velocity for specified chain. By default uses ``_endpoint_vel[chain]`` and applies TCP
transform of ``self._current_tool[chain]``. If the robot reports the endpoint position with the tool
transform applied, this should return ``self._endpoint_vel[chain]``
Called by the loop each timestep to update driver state.
:param chain: The chain index, always 0 for single arm driver
:rtype: com.robotraconteur.geometry.SpatialVelocity
:return: The spatial velocity (6x1) of the end effector
"""
# CALL LOCKED!
if self._current_tool[chain] is None:
return self._endpoint_vel[chain]
endpoint_vel = self._geometry_util.spatial_velocity_to_array(self._endpoint_vel).flatten()
endpoint_vel_ang = endpoint_vel[0:3]
endpoint_vel_lin = endpoint_vel[3:7]
current_tool_p = self._geometry_util.point_to_xyz(self._current_tool[chain].tcp["translation"])
endpoint_transform = self._geometry_util.pose_to_rox_transform(self._endpoint_pose[chain])
vel = endpoint_vel_lin + np.cross(endpoint_vel_ang, np.matmul(endpoint_transform.R, current_tool_p))
return self._geometry_util.array_to_spatial_acceleration(np.concatenate((endpoint_vel_ang, vel)))
def _calc_endpoint_vels(self):
"""
Compute the spatial velocity of all chains. Calls ``_calc_endpoint_vel()`` for each chain.
Called by the loop each timestep to update driver state.
:rtype: com.robotraconteur.geometry.SpatialVelocity[]
:return: Array of all chain spatial velocities. Single element array for single arm drivers
"""
if self._endpoint_vel is None:
return np.zeros((0,),dtype=self._spatial_velocity_dtype)
n = len(self._endpoint_vel)
o = np.zeros((n,),dtype=self._spatial_velocity_dtype)
for i in range(n):
o[i] = self._calc_endpoint_vel(i)
return o
def _fill_states(self, now):
"""
Fill the ``RobotState``, ``AdvancedRobotState``, and ``RobotStateSensorData`` structures based on
current driver state.
Called by the loop each timestep to fill data to send to clients.
:param now: stopwatch time in seconds
:rtype: Tuple[RobotState,AdvancedRobotState,RobotStateSensorData]
"""
ts = self._datetime_util.TimeSpec3Now()
rob_state = self._robot_state_type()
rob_state.ts = ts
rob_state.seqno = self._state_seqno
rob_state.command_mode = self._command_mode
rob_state.operational_mode = self._operational_mode
rob_state.controller_state = self._controller_state
flags = self._fill_state_flags(now)
rob_state.robot_state_flags = flags
rob_state.joint_position = np.copy(self._joint_position)
rob_state.joint_velocity = np.copy(self._joint_velocity)
rob_state.joint_effort = np.copy(self._joint_effort)
rob_state.joint_position_command = self._position_command if self._position_command is not None \
else np.zeros((0,))
rob_state.joint_velocity_command = self._velocity_command if self._velocity_command is not None \
else np.zeros((0,))
rob_state.kin_chain_tcp = self._calc_endpoint_poses()
rob_state.kin_chain_tcp_vel = self._calc_endpoint_vels()
rob_state.trajectory_running = self._trajectory_valid
a_rob_state = self._advanced_robot_state_type()
a_rob_state.ts = ts
a_rob_state.seqno = rob_state.seqno
a_rob_state.command_mode = rob_state.command_mode
a_rob_state.operational_mode = rob_state.operational_mode
a_rob_state.controller_state = rob_state.controller_state
a_rob_state.robot_state_flags = rob_state.robot_state_flags
a_rob_state.joint_position = rob_state.joint_position
a_rob_state.joint_velocity = rob_state.joint_velocity
a_rob_state.joint_effort = rob_state.joint_effort
a_rob_state.joint_position_command = rob_state.joint_position_command
a_rob_state.joint_velocity_command = rob_state.joint_velocity_command
a_rob_state.kin_chain_tcp = rob_state.kin_chain_tcp
a_rob_state.kin_chain_tcp_vel = rob_state.kin_chain_tcp_vel
a_rob_state.trajectory_running = rob_state.trajectory_running
a_rob_state.joint_position_units = [self._joint_position_units["radian"]]*self._joint_count
a_rob_state.joint_effort_units = [self._joint_effort_units["newton_meter"]]*self._joint_count
a_rob_state.trajectory_running = self._trajectory_valid
a_rob_state.trajectory_time = self._trajectory_current_time
a_rob_state.trajectory_max_time = self._trajectory_max_time
a_rob_state.trajectory_current_waypoint = self._trajectory_waypoint
a_rob_state.config_seqno = self._config_seqno
sensor_data_header = self._sensor_data_util.FillSensorDataHeader(self._robot_info.device_info, self._state_seqno)
sensor_data = self._robot_state_sensor_data_type()
sensor_data.data_header = sensor_data_header
sensor_data.robot_state = a_rob_state
return rob_state, a_rob_state, sensor_data
def _send_states(self, now, rr_robot_state, rr_advanced_robot_state, rr_state_sensor_data):
"""
Sends the states to the Robot Raconteur clients using broadcast wires
Called by the loop each timestep to send data to clients.
:param now: stopwatch time in seconds
:param rr_robot_state: populated RobotState instance
:param rr_advanced_robot_state: populated AdvancedRobotState instance
:param rr_state_sensor_data: populated RobotStateSensorData instance
"""
if not self._wires_ready:
return
self.robot_state.OutValue = rr_robot_state
self.advanced_robot_state.OutValue = rr_advanced_robot_state
self.robot_state_sensor_data.AsyncSendPacket(rr_state_sensor_data, lambda: None)
self.device_clock_now.OutValue = self._datetime_util.FillDeviceTime(self._robot_info.device_info, self._state_seqno)
@abstractmethod
def _send_disable(self, handler):
"""
Called to send a disable command to the robot. Only valid if driver has ``software_enable`` capability.
Implementing class must override if used. ``handler`` must be called to complete the asynchronous request.
"""
pass
def async_disable(self, handler):
"""Called by client to request robot disable. Calls ``_send_disable()``"""
self._send_disable(handler)
@abstractmethod
def _send_enable(self, handler):
"""
Called to send an enable command to the robot. Only valid if driver has ``software_enable`` capability.
Implementing class must override if used. ``handler`` must be called to complete the asynchronous request.
"""
pass
def async_enable(self, handler):
"""Called by client to request robot enable. Calls ``_send_enable()``"""
self._send_enable(handler)
@abstractmethod
def _send_reset_errors(self, handler):
"""
Called to send an reset errors command to the robot. Only valid if driver has ``software_reset_errors``
capability. Implementing class must override if used. ``handler`` must be called to complete the asynchronous
request.
"""
pass
def async_reset_errors(self, handler):
"""Called by client to request software reset errors. Calls ``_send_reset_errors()``"""
self._send_reset_errors(handler)
def _verify_communication(self, now):
"""
Verify that the driver is communicating with robot. Compares last communication tomi te
``_communication_timeout`` to determine when communication has been lost.
Called by the loop each timestep to check if robot is still communicating.
:param now: stopwatch time in seconds
"""
if (now - self._last_joint_state) > self._communication_timeout \
or (now - self._last_robot_state) > self._communication_timeout \
or (now - self._last_endpoint_state) > self._communication_timeout :
self._communication_failure = True
self._command_mode = self._robot_command_mode["invalid_state"]
if self._base_set_operational_mode:
self._operational_mode = self._robot_operational_mode["undefined"]
self._controller_state = self._robot_controller_state["undefined"]
self._joint_position = np.zeros((0,))
self._joint_velocity = np.zeros((0,))
self._joint_effort = np.zeros((0,))
self._endpoint_pose = None
self._endpoint_vel = None
return False
if self._base_set_operational_mode:
self._operational_mode = self._robot_operational_mode["cobot"]
self._communication_failure = False
return True
def _verify_robot_state(self, now):
"""
Verify that the robot is ready to operate, or if an error has occurred. Drops to ``halt`` command mode
if robot is not ready. Drops to ``error`` command mode if error has occurred.
:param now: stopwatch time in seconds
"""
if self._command_mode == self._robot_command_mode["homing"]:
if self._enabled and not self._error and not self._communication_failure:
if self._base_set_controller_state:
self._controller_state = self._robot_controller_state["motor_off"]
return True
if not self._ready or self._error or self._communication_failure:
if self._base_set_controller_state:
if self._stopped:
self._controller_state = self._robot_controller_state["emergency_stop"]
elif self._error:
self._controller_state = self._robot_controller_state["guard_stop"]
else:
self._controller_state = self._robot_controller_state["motor_off"]
if self._error or self._command_mode != self._robot_command_mode["halt"]:
self._command_mode = self._robot_command_mode["invalid_state"]
return False
if not self._enabled:
if self._base_set_controller_state:
self._controller_state = self._robot_controller_state["motor_off"]
if self._command_mode != self._robot_command_mode["halt"]:
self._command_mode = self._robot_command_mode["invalid_state"]
return False
if self._command_mode == self._robot_command_mode["invalid_state"] and not self._error:
self._command_mode = self._robot_command_mode["halt"]
if self._base_set_controller_state:
self._controller_state = self._robot_controller_state["motor_on"]
return True
def _fill_robot_command(self, now):
"""
Fill robot command to send to robot based on current state and commands sent by the client. Returns a
tuple containing three elements: ``success``, ``joint_position_command``, ``joint_velocity_command``.
If success is False, the driver cannot generate a command in its current state. If ``success`` is True,
either ``joint_position_command`` will be non-Null, or ``joint_velocity_command`` will be non-Null.
``joint_velocity_command`` is only valid if the driver has the ``velocity_command`` driver capability.
``joint_position_command`` is in radians (or meters), while ``joint_velocity_command`` is in radians/s
(or meters/s)
This function is called by the loop every timestep, and the return is passed to ``_send_joint_command()``.
It is not typically called by the implementing class.
:param now: stopwatch time in seconds
:rtype: Tuple[bool,np.array,np.array]
:return: ``success``, ``joint_position_command``, ``joint_velocity_command``
"""
self._wire_position_command_sent = False
self._wire_velocity_command_sent = False
self._trajectory_valid = False
self._trajectory_current_time = 0.
self._trajectory_max_time = 0.
self._trajectory_waypoint = 0
if self._command_mode != self._robot_command_mode["trajectory"]:
if self._active_trajectory is not None:
self._active_trajectory._invalid_mode()
self._active_trajectory = None
if len(self._queued_trajectories) > 0:
for t in self._queued_trajectories:
t._cancelled_in_queue()
self._queued_trajectories.clear()
if self._command_mode != self._robot_command_mode["jog"]:
if self._jog_trajectory_generator is not None:
self._jog_trajectory_generator = None
if self._jog_completion_handler is not None:
h = self._jog_completion_handler
self._jog_completion_handler = None
self._node.PostToThreadPool(lambda: h(None))
if self._command_mode != self._robot_command_mode["velocity_command"]:
# self._velocity_command = None
pass
if self._command_mode == self._robot_command_mode["jog"]:
if self._jog_trajectory_generator is not None:
jog_time = now - self._jog_start_time
if jog_time > self._jog_trajectory_generator.t_final:
if self._jog_completion_handler is not None:
h = self._jog_completion_handler
self._jog_completion_handler = None
self._node.PostToThreadPool(lambda: h(None))
self._jog_trajectory_generator = None
return False, None, None
res, jog_command = self._jog_trajectory_generator.get_command(jog_time)
if not res:
return False, None, None
joint_pos_cmd = jog_command.command_position
return True, joint_pos_cmd, None
else:
if self._jog_completion_handler is not None:
h = self._jog_completion_handler
self._jog_completion_handler = None
self._node.PostToThreadPool(lambda: h(None))
return True, None, None
elif self._command_mode == self._robot_command_mode["position_command"]:
res, pos_cmd, ts, ep = self.position_command.TryGetInValue()
if not res:
return True, None, None
if self._wire_position_command_last_ep != ep:
self._wire_position_command_last_ep = ep
self._wire_position_command_last_seqno = 0
if pos_cmd is None \
or pos_cmd.seqno < self._wire_position_command_last_seqno \
or abs(pos_cmd.state_seqno - self._state_seqno) > 10 \
or len(pos_cmd.command) != self._joint_count \
or len(pos_cmd.units) != 0 and len(pos_cmd.units) != self._joint_count:
return True, None, None
pos_cmd_j = None
if len(pos_cmd.units) == 0:
pos_cmd_j = pos_cmd.command
else:
pos_cmd_j = np.zeros((self._joint_count,))
for i in range(self._joint_count):
if pos_cmd.units[i] == self._joint_position_units["implicit"] \
or pos_cmd.units[i] == self._joint_position_units["radian"]:
pos_cmd_j[i] = pos_cmd.command[i]
elif pos_cmd.units[i] == self._joint_position_units["degree"]:
pos_cmd_j[i] = np.deg2rad(pos_cmd.command[i])
elif pos_cmd.units[i] == self._joint_position_units["ticks_rot"]:
pos_cmd_j[i] = pos_cmd.command[i]*(2.*np.pi)/(pow(2.,20.))
elif pos_cmd.units[i] == self._joint_position_units["nanoticks_rot"]:
pos_cmd_j[i] = pos_cmd.command[i]*(2.*np.pi)/(pow(2.,20.)*1.e9)
else:
return True, None, None
self._wire_position_command_last_seqno = pos_cmd.seqno
self._wire_position_command_sent = True
return True, pos_cmd_j, None
elif self._command_mode == self._robot_command_mode["velocity_command"]:
res, vel_cmd, ts, ep = self.velocity_command.TryGetInValue()
if not res:
return True, None, None
if self._wire_velocity_command_last_ep != ep:
self._wire_velocity_command_last_ep = ep
self._wire_velocity_command_last_seqno = 0
if vel_cmd is None \
or vel_cmd.seqno < self._wire_velocity_command_last_seqno \
or abs(vel_cmd.stat_seqno - self._state_seqno) > 10 \
or len(vel_cmd.command) != self._joint_count \
or len(vel_cmd.units) != 0 and len(vel_cmd.units) != self._joint_count:
return True, None, None
vel_cmd_j = None
if len(vel_cmd.units) == 0:
vel_cmd_j = vel_cmd.command
else:
vel_cmd_j = np.zeros((self._joint_count,))
for i in range(self._joint_count):
if vel_cmd.units[i] == self._joint_position_units["implicit"] \
or vel_cmd.units[i] == self._joint_position_units["radian_second"]:
vel_cmd_j[i] = vel_cmd.command[i]
elif vel_cmd.units[i] == self._joint_position_units["degree_second"]:
vel_cmd_j[i] = np.deg2rad(vel_cmd.command[i])
elif vel_cmd.units[i] == self._joint_position_units["ticks_rot_second"]:
vel_cmd_j[i] = vel_cmd.command[i]*(2.*np.pi)/(pow(2.,20.))
elif vel_cmd.units[i] == self._joint_position_units["nanoticks_rot_second"]:
vel_cmd_j[i] = vel_cmd.command[i]*(2.*np.pi)/(pow(2.,20.)*1.e9)
else:
return True, None, None
self._wire_position_command_last_seqno = vel_cmd.seqno
if self._speed_ratio != 1.0:
vel_cmd_j *= self._speed_ratio
self._wire_position_command_sent = True
return True, None, vel_cmd_j
elif self._command_mode == self._robot_command_mode["trajectory"]:
if self._active_trajectory is not None:
send_traj_cmd = False
interp_res, traj_pos, traj_vel, traj_t, traj_max_time, traj_waypoint = self._active_trajectory._get_setpoint(now, self._joint_position)
if interp_res == TrajectoryTaskRes.ready:
self._trajectory_valid = True
send_traj_cmd = False
elif interp_res == TrajectoryTaskRes.first_valid_setpoint or \
interp_res == TrajectoryTaskRes.valid_setpoint:
self._trajectory_valid = True
send_traj_cmd = True
elif interp_res == TrajectoryTaskRes.trajectory_complete:
self._trajectory_valid = True
send_traj_comd = True
self._active_trajectory = None
if len(self._queued_trajectories) > 0:
self._active_trajectory = self._queued_trajectories.pop(0)
else:
self._trajectory_valid = False
send_traj_cmd = False
self._active_trajectory = None
for w in self._queued_trajectories:
w._cancelled_in_queue()
self._queued_trajectories.clear()
if self._trajectory_valid:
self._trajectory_current_time = traj_t
self._trajectory_max_time = traj_max_time
self._trajectory_waypoint = traj_waypoint
if send_traj_cmd:
joint_pos_cmd = traj_pos
else:
joint_pos_cmd = None
else:
joint_pos_cmd = None
return True, joint_pos_cmd, None
else:
return True, None, None
@abstractmethod
def _send_robot_command(self, now, joint_pos_cmd, joint_vel_cmd):
"""
Called each timestep to send robot command. Must be implemented by subclass.
Both ``joint_pos_cmd`` and ``joint_vel_cmd`` may be None if there is no valid command available.
If ``joint_pos_cmd`` is non-Null, a joint position command must be sent. All drivers must support
position command. ``joint_vel_cmd`` is only used for ``velocity_command`` mode, and is only supported
if the driver has ``velocity_command`` capability.
:param now: stopwatch time in seconds
:param joint_pos_cmd: Joint position command in radians (or meters)
:param joint_vel_cmd: Joint velocity command in radians/s (or meters/s)
"""
pass
@property
def command_mode(self):
"""
Get or set the current command mode. Command mode must always be set to ``halt`` (0) before changing to another
mode. If there is an error, the mode will change to ``error`` (-1), and must be set to ``halt`` to clear the
error. If the error cannot be cleared, it may be possible to call the robot a "reset_errors()" function, if the
driver has the ``software_reset_errors`` capability.
``jog`` mode (1) requires the robot be in manual operational mode, if the robot supports reading the
operational mode and is not a cobot. The ``jog_command`` capability is required.
``trajectory`` mode (2) can run in auto or manual operational mode and requires the ``trajectory_command``
capability.
``position_command`` mode (3) can run in auto or manual operational mode and requires the
``position_command`` capability.
``velocity_command`` mode (4) can run in auto or manual operational mode and requires the
``velocity_command`` capability.
``homing_command`` mode (5) requires the ``homing_command`` capability. The implementation is device specific
"""
with self._lock:
return self._command_mode
@command_mode.setter
def command_mode(self, value):
with self._lock:
if self._command_mode == self._robot_command_mode["invalid_state"] \
and value == self._robot_command_mode["homing"]:
if not self._enabled or self._communication_failure:
raise RR.InvalidOperationException("Cannot set homing command mode in current state")
self._command_mode = self._robot_command_mode["homing"]
return
if self._command_mode == self._robot_command_mode["invalid_state"] \
and value == self._robot_command_mode["halt"] and self._enabled and not self._error \
and not self._communication_failure:
self._command_mode = value
return
if self._communication_failure:
raise RR.InvalidOperationException("Cannot set robot command mode in current state")
if not self._ready and value != self._robot_command_mode["halt"]:
raise RR.InvalidOperationException("Cannot set robot command mode in current state")
if self._command_mode != self._robot_command_mode["halt"] and value != self._robot_command_mode["halt"]:
raise RR.InvalidOperationException("Must switch to \"halt\" before selecting new mode")
if value == self._robot_command_mode["jog"]:
if not self._has_jog_command:
raise RR.InvalidOperationException("Robot does not support jog command mode")
self._jog_trajectory_generator = None
self._command_mode = self._robot_command_mode["jog"]
elif value == self._robot_command_mode["halt"]:
self._command_mode = value
elif value == self._robot_command_mode["homing"]:
if not self._uses_homing:
raise RR.InvalidOperationException("Robot does not support homing command mode")
self._command_mode = value
elif value == self._robot_command_mode["position_command"]:
if not self._has_position_command:
raise RR.InvalidOperationException("Robot does not support position command mode")
self._command_mode = value
elif value == self._robot_command_mode["velocity_command"]:
if not self._has_velocity_command:
raise RR.InvalidOperationException("Robot does not support velocity command mode")
self._command_mode = value
elif value == self._robot_command_mode["trajectory"]:
self._command_mode = value
else:
raise RR.InvalidOperationException("Invalid command mode specified")
def async_jog_freespace(self, joint_position, max_velocity, wait, handler):
"""
Called by client to jog the robot to a specified joint position with specified maximum joint velocity. If wait
is True, the function will not return to the client until the move is complete. Otherwise will return
immediately.
This function is typically used to jog the robot to a specific position.
Robot must be in ``jog`` command mode to call this function.
This is an asynchronous function, and handler must be called to return result to the client.
:param joint_position: The desired joint position in radians
:type joint_position: np.ndarray
:param max_velocity: The maximum joint velocity in radians/s
:type max_velocity: np.ndarray
:param wait: Wait for completion or return immediately
:type wait: bool
:param handler: Handler to call when function is complete
:type handler: Callable[[],Exception]
"""
with self._lock:
if self._command_mode != self._robot_command_mode["jog"]:
raise RR.InvalidOperationException("Robot not in jog mode")
if not self._ready:
raise RR.InvalidOperationException("Robot not ready")
if len(joint_position) != self._joint_count:
raise RR.InvalidArgumentException(f"joint_position array must have {self._joint_count} elements")
if len(max_velocity) != self._joint_count:
raise RR.InvalidArgumentException(f"max_velocity array must have {self._joint_count} elements")
if np.any(np.abs(self._joint_position - joint_position) > self._jog_joint_limit):
raise RR.InvalidArgumentException("Position command must be within 15 degrees from current")
if np.any(max_velocity <= 0):
raise RR.InvalidArgumentException("max_velocity must be greater than zero")
if self._jog_completion_handler is not None:
h = self._jog_completion_handler
self._jog_completion_handler = None
self._node.PostToThreadPool(
lambda: h(RR.OperationAbortedException("Operation interrupted by new jog command")))
now = self._stopwatch_ellapsed_s()
if self._jog_trajectory_generator is None:
if self._operational_mode == self._robot_operational_mode["manual_reduced_speed"]:
limits_a_max = np.array([j.joint_limits.reduced_acceleration for j in self._robot_info.joint_info],dtype=np.float64)
limits_v_max = np.array([j.joint_limits.reduced_velocity for j in self._robot_info.joint_info],dtype=np.float64)
elif self._operational_mode == self._robot_operational_mode["manual_full_speed"] or \
self._operational_mode == self._robot_operational_mode["cobot"]:
limits_a_max = np.array([j.joint_limits.acceleration for j in self._robot_info.joint_info],dtype=np.float64)
limits_v_max = np.array([j.joint_limits.velocity for j in self._robot_info.joint_info],dtype=np.float64)
else:
raise RR.InvalidOperationException("Invalid operation mode for jog")
limits_x_min = np.array([j.joint_limits.lower for j in self._robot_info.joint_info],dtype=np.float64)
limits_x_max = np.array([j.joint_limits.upper for j in self._robot_info.joint_info],dtype=np.float64)
limits = JointTrajectoryLimits(
x_min = limits_x_min,
x_max = limits_x_max,
v_max = limits_v_max,
a_max = limits_a_max,
j_max = None
)
for i in range(self._joint_count):
if np.abs(max_velocity[i]) > limits.v_max[i]:
raise RR.InvalidArgumentException(
f"max_velocity[{i}] is greater than joint limits ({limits.v_max[i]})")
self._jog_trajectory_generator = TrapezoidalJointTrajectoryGenerator(self._joint_count, limits)
new_req = JointTrajectoryPositionRequest(
current_position = (self._position_command if self._position_command is not None else np.copy(self._joint_position)),
current_velocity = (self._velocity_command if self._velocity_command is not None else np.zeros((self._joint_count,))),
desired_position = joint_position,
desired_velocity = np.zeros((self._joint_count,)),
max_velocity = max_velocity,
speed_ratio = self._speed_ratio
)
self._jog_trajectory_generator.update_desired_position(new_req)
self._jog_start_time = now
else:
jog_trajectory_t = now - self._jog_start_time
res, cmd = self._jog_trajectory_generator.get_command(jog_trajectory_t)
if not res:
raise RR.InvalidOperationException("Cannot update jog command")
new_req = JointTrajectoryPositionRequest(
current_position = cmd.command_position,
current_velocity = cmd.command_velocity,
desired_position = joint_position,
desired_velocity = np.zeros((self._joint_count,)),
max_velocity = max_velocity,
speed_ratio = self._speed_ratio
)
self._jog_trajectory_generator.update_desired_position(new_req)
self._jog_start_time = now
if not wait:
self._jog_completion_source = None
self._node.PostToThreadPool(lambda: handler(None))
else:
self._jog_completion_handler = handler
def async_jog_joint(self, joint_velocity, timeout, wait, handler):
"""
Called by client to jog the robot at a specified joint velocity for a specified time. If wait
is True, the function will not return to the client until the move is complete. Otherwise will return
immediately.
This function is typically called repeatedly by the client (with wait=False) to drive the robot in response to
user input such as a panel button or joystick.
Robot must be in ``jog`` command mode to call this function.
This is an asynchronous function, and handler must be called to return result to the client.
:param joint_velocity: The desired joint velocity position in radians/s
:type joint_position: np.ndarray
:param timeout: The timeout to run at the specified velocity
:type timeout: float
:param wait: Wait for completion or return immediately
:type wait: bool
:param handler: Handler to call when function is complete
:type handler: Callable[[],Exception]
"""
with self._lock:
if self._command_mode != self._robot_command_mode["jog"]:
raise RR.InvalidOperationException("Robot not in jog mode")
if not self._ready:
raise RR.OperationAbortedException("Robot not ready")
if len(joint_velocity) != self._joint_count:
raise RR.InvalidArgumentException(f"joint_velocity array must have {self._joint_count} elements")
if timeout <= 0:
raise RR.InvalidArgumentException("Invalid jog timeout specified")
for i in range(self._joint_count):
if abs(joint_velocity[i] > self._robot_info.joint_info[i].joint_limits.reduced_velocity):
raise RR.InvalidArgumentException("Joint velocity exceeds joint limits")
if self._jog_completion_handler is not None:
h = self._jog_completion_handler
self._jog_completion_handler = None
self._node.PostToThreadPool(
lambda: h(RR.OperationAbortedException("Operation interrupted by new jog command")))
now = self._stopwatch_ellapsed_s()
if self._jog_trajectory_generator is None:
if self._operational_mode == self._robot_operational_mode["manual_reduced_speed"]:
limits_a_max = np.array([j.joint_limits.reduced_acceleration for j in self._robot_info.joint_info],dtype=np.float64)
limits_v_max = np.array([j.joint_limits.reduced_velocity for j in self._robot_info.joint_info],dtype=np.float64)
elif self._operational_mode == self._robot_operational_mode["manual_full_speed"] or \
self._operational_mode == self._robot_operational_mode["cobot"]:
limits_a_max = np.array([j.joint_limits.acceleration for j in self._robot_info.joint_info],dtype=np.float64)
limits_v_max = np.array([j.joint_limits.velocity for j in self._robot_info.joint_info],dtype=np.float64)
else:
raise RR.InvalidOperationException("Invalid operation mode for jog")
limits_x_min = np.array([j.joint_limits.lower for j in self._robot_info.joint_info],dtype=np.float64)
limits_x_max = np.array([j.joint_limits.upper for j in self._robot_info.joint_info],dtype=np.float64)
limits = JointTrajectoryLimits(
x_min = limits_x_min,
x_max = limits_x_max,
v_max = limits_v_max,
a_max = limits_a_max,
j_max = None
)
self._jog_trajectory_generator = TrapezoidalJointTrajectoryGenerator(self._joint_count, limits)
new_req = JointTrajectoryVelocityRequest(
current_position = (self._position_command if self._position_command is not None else np.copy(self._joint_position)),
current_velocity = (self._velocity_command if self._velocity_command is not None else np.zeros((self._joint_count,))),
desired_velocity = joint_velocity,
speed_ratio = self._speed_ratio,
timeout = timeout
)
self._jog_trajectory_generator.update_desired_velocity(new_req)
self._jog_start_time = now
else:
jog_trajectory_t = now - self._jog_start_time
res, cmd = self._jog_trajectory_generator.get_command(jog_trajectory_t)
if not res:
raise RR.InvalidOperationException("Cannot update jog command")
new_req = JointTrajectoryVelocityRequest(
current_position = cmd.command_position,
current_velocity = cmd.command_velocity,
desired_velocity = joint_velocity,
timeout = timeout,
speed_ratio = self._speed_ratio
)
self._jog_trajectory_generator.update_desired_velocity(new_req)
self._jog_start_time = now
if not wait:
self._jog_completion_source = None
self._node.PostToThreadPool(lambda: handler(None))
else:
self._jog_completion_handler = handler
@property
def robot_info(self):
"""
Returns the current ``RobotInfo`` structure. The ``RobotInfo`` structure will be updated with tool
and payload information as it changes.
:return: The populated RobotInfo structure
:rtype: RobotInfo
"""
with self._lock:
for i in range(len(self._robot_info.chains)):
self._robot_info.chains[i].current_tool = self._current_tool[i]
self._robot_info.chains[i].current_payload = self._current_payload[i]
if self._robot_info.chains[i].extended is None:
self._robot_info.chains[i].extended = dict()
self._robot_info.chains[i].extended["current_payload_pose"] = \
RR.VarValue(self._current_payload_pose[i], "com.robotraconteur.geometry.Pose") \
if self._current_payload_pose[i] is not None else None
return self._robot_info
def execute_trajectory(self, trajectory):
"""
Called by the client to execute a trajectory. Must be in ``trajectory`` command mode.
This function returns a generator. The client must call ``Next()`` repeatedly on the generator
until the trajectory is complete.
The first waypoint on the trajectory must be reasonably close to the current robot position.
:param trajectory: The trajectory to execute
:type trajectory: JointTrajectory
:return: The trajectory generator, that must have ``Next()`` called repeatedly to execute trajectory
:rtype: TrajectoryStatus{generator}
"""
owner_ep = RR.ServerEndpoint.GetCurrentEndpoint()
with self._lock:
speed_ratio = self._speed_ratio
current_joint_pos = np.copy(self._joint_position)
interp = JointTrajectoryInterpolator(self._robot_info)
interp.load_trajectory(trajectory, speed_ratio)
res, joint_pos1, _ = interp.interpolate(0)
assert res
if np.any(np.abs(current_joint_pos - joint_pos1) > self._trajectory_error_tol):
raise RR.InvalidArgumentException("Starting waypoint too far from current joint positions")
with self._lock:
if self._command_mode != self._robot_command_mode["trajectory"]:
raise RR.InvalidOperationException("Robot must be in trajectory mode to execut trajectory")
traj_task = None
if self._active_trajectory is None:
traj_task = TrajectoryTask(self, interp, False, owner_ep)
self._active_trajectory = traj_task
else:
traj_task = TrajectoryTask(self, interp, True, owner_ep)
self._queued_trajectories.append(traj_task)
return traj_task
def _cancel_trajectory(self, trajectory):
"""
Cancel a trajectory that is in the queue. Called from the trajectory generator if ``Close()`` is called.
"""
with self._lock:
if trajectory is self._active_trajectory:
self._active_trajectory = None
for t in self._queued_trajectories:
t._cancelled_in_queue()
self._queued_trajectories.clear()
else:
for i in range(len(self._queued_trajectories)):
if trajectory is self._queued_trajectories[i]:
t_index = i
break
if t_index >= 0:
for i in range(len(self._queued_trajectories)-1, t_index, -1):
self._queued_trajectories[i]._cancelled_in_queue()
self._queued_trajectories.pop(i)
self._queued_trajectories.pop(t_index)
def _abort_trajectory(self, trajectory):
"""
Aborts trajectory and all trajectories by dropping to ``halt`` command made. Called by trajectory
generater if ``Abort()`` is called.
"""
self._command_mode = self._robot_command_mode["halt"]
@property
def speed_ratio(self):
"""
Get or set the speed ratio. Can be used to reduce or increase speed of trajectory and other operations.
:param value: New speed ratio. Must be between 0.1 and 10
:type value: float
"""
return self._speed_ratio
@speed_ratio.setter
def speed_ratio(self, value):
if value < 0.1 or value > 10:
raise RR.InvalidArgumentException("Invalid speed_ratio")
self._speed_ratio = value
@property
def operational_mode(self):
"""Return the current operational mode of the controller, if available"""
return self._operation_mode
def controller_state(self):
"""Return the current state of the vendor robot controller, if available"""
return self._controller_state
def current_errors(self):
"""Returns currently reported errors, if available"""
return []
def jog_cartesian(self, velocity, timeout, wait):
"""
Called by client to jog the robot at a specified cartesian velocity for a specified time. If wait
is True, the function will not return to the client until the move is complete. Otherwise will return
immediately.
This function is typically called repeatedly by the client (with wait=False) to drive the robot in response to
user input such as a panel button or joystick.
Robot must be in ``jog`` command mode to call this function.
This is an asynchronous function, and handler must be called to return result to the client.
:param velocity: The desired end effector spatial velocity position in meters/s,radians/s
:type joint_position: SpatialVelocity
:param timeout: The timeout to run at the specified velocity
:type timeout: float
:param wait: Wait for completion or return immediately
:type wait: bool
:param handler: Handler to call when function is complete
:type handler: Callable[[],Exception]
"""
raise RR.NotImplementedException("Not implemented")
def async_home(self, handler):
"""
Called by client to home the robot. Behavior is device specific.
Robot must be in ``homing`` command mode to call this function.
:param handler: Handler to call when function is complete
:type handler: Callable[[],Exception]
"""
raise RR.NotImplementedException()
def async_getf_signal(self, signal_name, handler):
"""Get the value of a signal. Optionally implemented by subclass"""
raise RR.NotImplementedException()
def async_setf_signal(self, signal_name, value, handler):
"""Set the value of a signal. Optionally implemented by subclass"""
raise RR.NotImplementedException()
def tool_attached(self, chain, tool):
"""
Called by client to notify the driver that a tool has been attached. TCP is used to compute endpoint position
and velocity. Implementing class may also update the vendor robot controller if necessary.
:param chain: The kinematic chain the tool has been attached
:type chain: int
:param tool: The ToolInfo structure of the tool, specified by the client
:type tool: ToolInfo
"""
if tool is None:
raise RR.NullValueException("Tool cannot be null")
if chain > 0 or not (chain < len(self._current_tool)):
raise RR.InvalidArgumentException(f"Invalid kinematic chain {chain} for tool")
with self._lock:
if self._current_tool[chain] is not None:
raise RR.InvalidArgumentException(f"Tool already attached to kinematic chain {chain}")
self._current_tool[chain] = tool
try:
device_name = tool.device_info.device.name
except:
traceback.print_exc()
device_name = ""
self.tool_changed.fire(chain, device_name)
self._config_seqno+=1
def tool_detached(self, chain, tool_name):
"""
Called by client to notify the driver that a tool has been detached. Payloads must be detached before
the tool can be detached.
:param payload_name: The name of the tool that was detached
:type payload_name: str
"""
if chain > 0 or not (chain < len(self._current_tool)):
raise RR.InvalidArgumentException(f"Invalid kinematic chain {chain} for tool")
with self._lock:
if self._current_tool[chain] is None:
raise RR.InvalidArgumentException(f"Tool not attached to kinematic chain {chain}")
if self._current_payload[chain] is not None:
raise RR.InvalidArgumentException(f"Cannot remove tool while payload attached")
if len(tool_name) > 0:
try:
device_name = self._current_tool.device_info.device.name
except:
traceback.print_exc()
device_name = ""
if device_name != tool_name:
raise RR.InvalidArgumentException(f"Invalid tool name to detach from kinematic chain {chain}")
self._current_tool[chain] = None
self.tool_changed.fire(chain, "")
self._config_seqno+=1
def payload_attached(self, chain, payload, pose):
"""
Called by client to notify the driver that a payload has been attached to the tool. A tool must be attached
to attach a payload. The pose between the payload and tool is also specified.
Implementing class may also update the vendor robot controller if necessary.
:param chain: The kinematic chain the tool has been attached
:type chain: int
:param payload: The PayloadInfo structure of the tool, specified by the client
:type tool: PayloadInfo
:param pose: The pose of the payload relative to the tool TCP
:type pose: com.geometry.Pose
"""
if payload is None:
raise RR.NullValueException("Payload cannot be null")
if chain > 0 or not (chain < len(self._current_payload)):
raise RR.InvalidArgumentException(f"Invalid kinematic chain {chain} for payload")
with self._lock:
if self._current_tool[chain] is None:
raise RR.InvalidArgumentException(f"No tool attached to kinematic chain {chain}, cannot attach payload")
if self._current_payload[chain] is not None:
raise RR.InvalidArgumentException(f"Payload already attached to kinematic chain {chain}")
self._current_payload[chain] = payload
self._current_payload_pose[chain] = pose
try:
device_name = payload.device_info.device.name
except:
traceback.print_exc()
device_name = ""
self.payload_changed.fire(chain, device_name)
self._config_seqno+=1
def payload_detached(self, chain, payload_name):
"""
Called by client to notify the driver that a payload has been detached
:param payload_name: The name of the payload that was detached
:type payload_name: str
"""
if chain > 0 or not (chain < len(self._current_payload)):
raise RR.InvalidArgumentException(f"Invalid kinematic chain {chain} for payload")
with self._lock:
if self._current_payload[chain] is None:
raise RR.InvalidArgumentException(f"Payload not attached to kinematic chain {chain}")
if len(payload_name) != 0:
try:
device_name = self._current_payload[chain].device_info.device.name
except:
traceback.print_exc()
device_name = ""
if device_name != payload_name:
raise RR.InvalidArgumentException(f"Invalid payload name to detach from kinematic chain {chain}")
self._current_payload[chain] = None
self.payload_changed.fire(chain, "")
self._config_seqno+=1
def getf_param(self, param_name):
"""Get the value of a parameter. Optionally implemented by subclass"""
raise RR.InvalidArgumentException("Invalid parameter")
def setf_param(self, param_name, value):
"""Set the value of a parameter. Optionally implemented by subclass"""
raise RR.InvalidArgumentException("Invalid parameter")
@property
def device_info(self):
"""Returns the DeviceInfo structure contained in RobotInfo"""
return self._robot_info.device_info
@property
def isoch_info(self):
"""Returns the IsochInfo structure"""
iso_info = self._isoch_info_type()
iso_info.update_rate = 1.0/self._update_period
iso_info.max_downsample = 1000
iso_info.isoch_epoch = self._stopwatch_epoch
return iso_info
@property
def isoch_downsample(self):
"""
Get or set the current client isoch_downsample level. By default, the wires and pipes will transmit
every timestep. The ``isoch_downsample`` property allows the client to request every ``n`` samples be dropped.
For instance, if ``isoch_downsample`` is set to 2, the driver will skip two timesteps, and only transmit on every
third timestep. Check ``isoch_info` to determine the native loop update rate in Hz.
:param value: The downsample level
:type value: int
"""
with self._lock:
return self._broadcast_downsampler.GetClientDownsample(RR.ServerEndpoint.GetCurrentEndpoint())
@isoch_downsample.setter
def isoch_downsample(self, value):
with self._lock:
self._broadcast_downsampler.SetClientDownsample(RR.ServerEndpoint.GetCurrentEndpoint(), value)
class TrajectoryTaskRes(Enum):
unknown = 0
ready = 1
first_valid_setpoint = 2
valid_setpoint = 3
trajectory_complete = 4
invalid_state = 5
joint_tol_error = 6
failed = 7
class TrajectoryTask:
def __init__(self, parent, path, queued, owner_ep):
self._parent = parent
self._path = path
self._queued = queued
self._owner_ep = owner_ep
self._next_called = False
self._started = False
self._start_time = 0
self._aborted = False
self._cancelled = False
self._joint_tol_error = False
self._finished = False
self._next_wait_handler = []
self._queue_wait_handler = []
self._success_sent = False
self._node = parent._node
self._trajectory_status_type = \
self._node.GetStructureType("com.robotraconteur.robotics.trajectory.TrajectoryStatus")
self._action_consts = self._node.GetConstants("com.robotraconteur.action")
self._action_status_code = self._action_consts["ActionStatusCode"]
self._traj_t = 0.0
self._traj_waypoint = 0
self._lock = threading.Lock()
def _call_next_wait_handler(self, err):
with self._lock:
for c in self._next_wait_handler:
self._node.PostToThreadPool(lambda c=c, err=err: c(err))
self._next_wait_handler.clear()
def _call_queue_wait_handler(self,err):
with self._lock:
for c in self._queue_wait_handler:
self._node.PostToThreadPool(lambda c=c, err=err: c(err))
self._next_wait_handler.clear()
def Abort(self):
self._aborted = True
self._parent._abort_trajectory(self)
self._call_next_wait_handler(RR.OperationAbortedException("Trajectory execution aborted"))
def Close(self):
self._cancelled = True
self._parent._cancel_trajectory(self)
self._call_next_wait_handler(RR.OperationAbortedException("Trajectory execution cancelled"))
def AsyncNext(self,handler):
if self._success_sent:
raise RR.StopIterationException("")
with self._lock:
first_call = not self._next_called
self._next_called = True
if first_call and self._queued:
# Report back that we are queued immediately
ret = self._trajectory_status_type()
ret.action_status = self._action_status_code["queued"]
ret.trajectory_time = 0
ret.current_waypoint = 0
ret.seqno = self._parent._state_seqno
handler(ret, None)
return
complete_called = [False]
def complete(err):
with self._lock:
if complete_called[0]:
return
complete_called[0] = True
if err:
handler(None, err)
if not self._started:
# Still queued...
ret = self._trajectory_status_type()
ret.action_status = self._action_status_code["queued"]
ret.trajectory_time = 0
ret.current_waypoint = 0
ret.seqno = self._parent._state_seqno
handler(ret, None)
return
if self._finished:
self._success_sent = True
ret = self._trajectory_status_type()
ret.action_status = self._action_status_code["complete"]
ret.trajectory_time = self._traj_t
ret.current_waypoint = int(self._traj_waypoint)
ret.seqno = self._parent._state_seqno
handler(ret, None)
return
else:
ret = self._trajectory_status_type()
ret.action_status = self._action_status_code["running"]
ret.trajectory_time = self._traj_t
ret.current_waypoint = int(self._traj_waypoint)
ret.seqno = self._parent._state_seqno
handler(ret,None)
return
if self._queued:
self._next_wait_handler.append(complete)
self._queue_wait_handler.append(complete)
else:
self._next_wait_handler.append(complete)
timer = self._node.CreateTimer(5, lambda _: complete(None), True)
timer.Start()
def _cancelled_in_queue(self):
self._cancelled = True
self._call_next_wait_handler(RR.OperationAbortedException("Trajectory cancelled by controller before start"))
def _invalid_mode(self):
self._aborted = True
self._call_next_wait_handler(RR.OperationAbortedException("Invalid mode for trajectory execution"))
def _get_setpoint(self, now, current_joint_pos):
if self._cancelled or self._aborted:
return TrajectoryTaskRes.failed, None, None, 0.0, 0.0, 0
first_call = False
t = 0.0
if self._next_called:
if not self._started:
self._start_time = now
self._started = True
first_call = True
t = now - self._start_time
res, joint_pos1, current_waypoint1 = self._path.interpolate(t)
if not res:
self._call_next_wait_handler(Exception("Trajectory execution failed"))
return TrajectoryTaskRes.failed, None, None, 0.0, 0.0, 0
if np.any(np.abs(current_joint_pos - joint_pos1) > self._parent._trajectory_error_tol):
self._call_next_wait_handler(RR.OperationFailedException("Trajectory tolerance failure"))
return TrajectoryTaskRes.ready, None, None, 0.0, 0.0, 0
if not self._next_called:
return TrajectoryTaskRes.ready, None, None, 0.0, self._path.max_time, 0
if t > self._path.max_time:
self._traj_t = t
self._traj_waypoint = current_waypoint1
self._finished = True
self._call_next_wait_handler(None)
return TrajectoryTaskRes.trajectory_complete, joint_pos1, None, t, self._path.max_time, current_waypoint1
if first_call:
if self._queued:
self._queued = False
self._call_queue_wait_handler(None)
return TrajectoryTaskRes.first_valid_setpoint, joint_pos1, None, t, self._path.max_time, current_waypoint1
else:
return TrajectoryTaskRes.valid_setpoint, joint_pos1, None, t, self._path.max_time, current_waypoint1
#TODO: Add connection test? | /robotraconteur_abstract_robot-0.2.1-py3-none-any.whl/robotraconteur_abstract_robot/abstract_robot.py | 0.876039 | 0.418935 | abstract_robot.py | pypi |
import numpy as np
from scipy.interpolate import CubicSpline
import RobotRaconteur as RR
class JointTrajectoryInterpolator:
def __init__(self, info):
self._joint_names = [j.joint_identifier.name for j in info.joint_info]
self._joint_min = np.array([j.joint_limits.lower for j in info.joint_info])
self._joint_max = np.array([j.joint_limits.upper for j in info.joint_info])
self._joint_vel_max = np.array([j.joint_limits.velocity for j in info.joint_info])
self._joint_splines = None
self._max_t = 0
self._joint_start = None
self._joint_end = None
self._waypoint_times = None
def load_trajectory(self, traj, speed_ratio):
if len(traj.joint_names) > 0:
if traj.joint_names != self._joint_names:
raise RR.InvalidArgumentException("Joint names in trajectory must match robot joint names")
if traj.waypoints is None:
raise RR.InvalidArgumentException("Waypoint list must not be null")
if len(traj.waypoints) < 5:
raise RR.InvalidArgumentException("Waypoint list must contain five or more waypoints")
if traj.waypoints[0].time_from_start != 0:
raise RR.InvalidArgumentException("Trajectory time_from_start must equal zero for first waypoint")
n_waypoints = len(traj.waypoints)
n_joints = len(self._joint_names)
traj_t = np.zeros((n_waypoints,))
traj_j = np.zeros((n_waypoints, n_joints))
last_t = 0
for i in range(n_waypoints):
w = traj.waypoints[i]
if (len(w.joint_position) != n_joints):
raise RR.InvalidArgumentException(f"Waypoint {i} invalid joint array length")
if len(w.joint_velocity) != n_joints and len(w.joint_velocity) != 0:
raise RR.InvalidArgumentException(f"Waypoint {i} invalid joint velocity array length")
if len(w.position_tolerance) != n_joints and len(w.position_tolerance) != 0:
raise RR.InvalidArgumentException(f"Waypoint {i} invalid tolerance array length")
if len(w.velocity_tolerance) != n_joints and len(w.velocity_tolerance) != 0:
raise RR.InvalidArgumentException(f"Waypoint {i} invalid tolerance array length")
if i > 0:
if w.time_from_start/speed_ratio <= last_t:
raise RR.InvalidArgumentException(f"Waypoint {i} time_from_start must be increasing")
if w.time_from_start/speed_ratio - last_t > 0.1:
raise RR.InvalidArgumentException("Waypoint {i} more than 100 ms from previous waypoint")
if np.any(w.joint_position > self._joint_max) or np.any(w.joint_position < self._joint_min):
raise RR.InvalidArgumentException(f"Waypoint {i} exceeds joint limits")
if len(w.joint_velocity) > 0:
if np.any(np.abs(w.joint_velocity*speed_ratio) > self._joint_vel_max):
raise RR.InvalidArgumentException(f"Waypoint {i} exceeds joint velocity limits")
if i > 0:
last_w = traj.waypoints[i-1]
dt = w.time_from_start/speed_ratio - last_w.time_from_start/speed_ratio
dj = np.abs(w.joint_position - last_w.joint_position)
if np.any (dj/dt > self._joint_vel_max):
raise RR.InvalidArgumentException(f"Waypoint {i} exceeds joint velocity limits")
traj_t[i] = w.time_from_start/speed_ratio
traj_j[i,:] = w.joint_position
last_t = w.time_from_start / speed_ratio
self._joint_splines = CubicSpline(traj_t, traj_j)
self._max_t = last_t
self._joint_start = traj.waypoints[0].joint_position
self._joint_end = traj.waypoints[-1].joint_position
self._waypoint_times = traj_t
@property
def max_time(self):
return self._max_t
def interpolate(self, time):
if time <= 0:
return True, self._joint_start, 0
if time >= self._max_t:
return True, self._joint_end, len(self._waypoint_times) -1
joint_pos = self._joint_splines(time)
a = np.where(self._waypoint_times <= time)[0]
if len(a) > 0:
current_waypoint = a[-1]
else:
current_waypoint = 0
return True, joint_pos, current_waypoint | /robotraconteur_abstract_robot-0.2.1-py3-none-any.whl/robotraconteur_abstract_robot/joint_trajectory_interpolator.py | 0.815526 | 0.45647 | joint_trajectory_interpolator.py | pypi |
"""处理程序运行的各种参数."""
import os
class SQLOptions(object):
"""处理程序运行的各种参数."""
def __init__(self):
self.m_SQL_OptionList = []
self.m_SQL_OptionList.append({"Name": "WHENEVER_SQLERROR",
"Value": "CONTINUE",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "PAGE",
"Value": "OFF",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "ECHO",
"Value": "ON",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "TIMING",
"Value": "OFF",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "TIME",
"Value": "OFF",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "CSV_HEADER",
"Value": "OFF",
"Comments": 'ON|OFF',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "CSV_DELIMITER",
"Value": ",",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "CSV_QUOTECHAR",
"Value": "",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "FEEDBACK",
"Value": "ON",
"Comments": 'ON|OFF',
"Hidden": False
})
self.m_SQL_OptionList.append({"Name": "TERMOUT",
"Value": "ON",
"Comments": 'ON|OFF',
"Hidden": False
})
self.m_SQL_OptionList.append({"Name": "ARRAYSIZE",
"Value": 10000,
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "SQLREWRITE",
"Value": "ON",
"Comments": 'ON|OFF',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "LOB_LENGTH",
"Value": 20,
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "FLOAT_FORMAT",
"Value": "%.7g",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "DECIMAL_FORMAT",
"Value": "",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "DATE_FORMAT",
"Value": "%Y-%m-%d",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "DATETIME_FORMAT",
"Value": "%Y-%m-%d %H:%M:%S.%f",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "TIME_FORMAT",
"Value": "%H:%M:%S.%f",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "DATETIME-TZ_FORMAT",
"Value": "%Y-%m-%d %H:%M:%S %z",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "OUTPUT_SORT_ARRAY",
"Value": "ON",
"Comments": 'Print Array output with sort order.',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "OUTPUT_PREFIX",
"Value": "",
"Comments": 'Output Prefix',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "OUTPUT_ERROR_PREFIX",
"Value": "",
"Comments": 'Error Output Prefix',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "OUTPUT_FORMAT",
"Value": "LEGACY",
"Comments": 'TAB|CSV|LEGACY',
"Hidden": False
})
self.m_SQL_OptionList.append({"Name": "CONN_RETRY_TIMES",
"Value": "1",
"Comments": 'Connect retry times.',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "DEBUG",
"Value": "OFF",
"Comments": 'ON|OFF',
"Hidden": True})
self.m_SQL_OptionList.append({"Name": "CONNURL",
"Value": "",
"Comments": 'Connection URL',
"Hidden": True})
self.m_SQL_OptionList.append({"Name": "CONNPROP",
"Value": "",
"Comments": 'Connection Props',
"Hidden": True})
self.m_SQL_OptionList.append({"Name": "SILENT",
"Value": "OFF",
"Comments": '',
"Hidden": True})
self.m_SQL_OptionList.append({"Name": "SQL_EXECUTE",
"Value": "PREPARE",
"Comments": 'DIRECT|PREPARE',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "JOBMANAGER",
"Value": "OFF",
"Comments": 'ON|OFF',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "JOBMANAGER_METAURL",
"Value": "",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "SCRIPT_TIMEOUT",
"Value": -1,
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "SQL_TIMEOUT",
"Value": -1,
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "PRIORITY",
"Value": "",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "SCRIPT_ENCODING",
"Value": "UTF-8",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "RESULT_ENCODING",
"Value": "UTF-8",
"Comments": '',
"Hidden": False})
def get(self, p_ParameterName):
"""根据参数名称返回参数,若不存在该参数,返回None."""
for item in self.m_SQL_OptionList:
if item["Name"] == p_ParameterName:
return item["Value"]
return None
def getOptionList(self):
"""返回全部的运行参数列表"""
return self.m_SQL_OptionList
def set(self, p_ParameterName, p_ParameterValue, p_ParameterDefaultValue=None, p_Hidden=False):
"""设置运行参数, 若p_ParameterValue为空,则加载默认参数"""
for pos in range(0, len(self.m_SQL_OptionList)):
if self.m_SQL_OptionList[pos]["Name"] == p_ParameterName:
if p_ParameterValue is None:
m_ParameterValue = None
else:
m_ParameterValue = str(p_ParameterValue).strip()
if m_ParameterValue.upper().startswith("${ENV(") and m_ParameterValue.upper().endswith(")}"):
m_EnvName = m_ParameterValue[6:-2]
if m_EnvName in os.environ:
m_ParameterValue = os.environ[m_EnvName]
else:
m_ParameterValue = None
if m_ParameterValue is None:
if p_ParameterDefaultValue is None:
m_ParameterValue = ""
else:
m_ParameterValue = p_ParameterDefaultValue
self.m_SQL_OptionList[pos]["Value"] = m_ParameterValue
return True
# 对@开头的进行保存和处理
m_ParameterName = p_ParameterName.strip()
if m_ParameterName.startswith("@"):
m_ParameterValue = p_ParameterValue.strip()
if m_ParameterValue.upper().startswith("${ENV(") and m_ParameterValue.upper().endswith(")}"):
m_EnvName = m_ParameterValue[6:-2]
if m_EnvName in os.environ:
m_ParameterValue = os.environ[m_EnvName]
else:
m_ParameterValue = None
if m_ParameterValue is None:
if p_ParameterDefaultValue is None:
m_ParameterValue = ""
else:
m_ParameterValue = p_ParameterDefaultValue
self.m_SQL_OptionList.append({"Name": m_ParameterName,
"Value": m_ParameterValue,
"Hidden": p_Hidden,
"Comments": 'User session variable'})
return True
# 对于不认识的参数信息,直接抛出到上一级别,做CommmonNotFound处理
return False | /robotslacker-sqlcli-noodbc-0.0.4.tar.gz/robotslacker-sqlcli-noodbc-0.0.4/sqlcli/sqloption.py | 0.42322 | 0.196248 | sqloption.py | pypi |
from collections import namedtuple
__all__ = []
def export(defn):
"""Decorator to explicitly mark functions that are exposed in a lib."""
globals()[defn.__name__] = defn
__all__.append(defn.__name__)
return defn
SpecialCommand = namedtuple(
"SpecialCommand",
[
"handler",
"command",
"description",
"hidden",
"case_sensitive",
],
)
COMMANDS = {}
@export
class CommandNotFound(Exception):
pass
@export
def parse_special_command(sql):
command, _, arg = sql.partition(" ")
verbose = "+" in command
command = command.strip().replace("+", "")
return command, verbose, arg.strip()
@export
def special_command(
command,
description,
hidden=False, # 是否显示在帮助信息里头
case_sensitive=False, # 是否忽略输入的大小写
):
def wrapper(wrapped):
register_special_command(
wrapped,
command,
description,
hidden,
case_sensitive,
)
return wrapped
return wrapper
@export
def register_special_command(
handler,
command,
description,
hidden=False,
case_sensitive=False
):
cmd = command.lower() if not case_sensitive else command
COMMANDS[cmd] = SpecialCommand(
handler, command, description, hidden, case_sensitive
)
@export
def execute(cls, sql, timeout: int):
"""Execute a special command and return the results. If the special command
is not supported a KeyError will be raised.
"""
command, verbose, arg = parse_special_command(sql)
if (command not in COMMANDS) and (command.lower() not in COMMANDS):
raise CommandNotFound
try:
special_cmd = COMMANDS[command]
except KeyError:
special_cmd = COMMANDS[command.lower()]
if special_cmd.case_sensitive:
raise CommandNotFound("Command not found: %s" % command)
return special_cmd.handler(cls, arg=arg, timeout=timeout)
@special_command("help", "Show this help.")
def show_help(cls, arg, timeout: int):
if cls and arg and timeout:
pass
headers = ["Command", "Description"]
result = []
for _, value in sorted(COMMANDS.items()):
if not value.hidden:
for m_desc in value.description.split('\n'):
result.append((value.command, m_desc))
return [{
"title": None,
"rows": result,
"headers": headers,
"columnTypes": None,
"status": None
}, ]
@special_command("quit", "Quit.")
def quit_sqlcli(cls, arg, timeout: int):
if cls and arg and timeout:
pass
raise EOFError | /robotslacker-sqlcli-noodbc-0.0.4.tar.gz/robotslacker-sqlcli-noodbc-0.0.4/sqlcli/commandanalyze.py | 0.691706 | 0.15633 | commandanalyze.py | pypi |
"""处理程序运行的各种参数."""
import os
class SQLOptions(object):
"""处理程序运行的各种参数."""
def __init__(self):
self.m_SQL_OptionList = []
self.m_SQL_OptionList.append({"Name": "WHENEVER_SQLERROR",
"Value": "CONTINUE",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "PAGE",
"Value": "OFF",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "ECHO",
"Value": "ON",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "TIMING",
"Value": "OFF",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "TIME",
"Value": "OFF",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "CSV_HEADER",
"Value": "OFF",
"Comments": 'ON|OFF',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "CSV_DELIMITER",
"Value": ",",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "CSV_QUOTECHAR",
"Value": "",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "FEEDBACK",
"Value": "ON",
"Comments": 'ON|OFF',
"Hidden": False
})
self.m_SQL_OptionList.append({"Name": "TERMOUT",
"Value": "ON",
"Comments": 'ON|OFF',
"Hidden": False
})
self.m_SQL_OptionList.append({"Name": "ARRAYSIZE",
"Value": 10000,
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "SQLREWRITE",
"Value": "ON",
"Comments": 'ON|OFF',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "LOB_LENGTH",
"Value": 20,
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "FLOAT_FORMAT",
"Value": "%.7g",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "DECIMAL_FORMAT",
"Value": "",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "DATE_FORMAT",
"Value": "%Y-%m-%d",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "DATETIME_FORMAT",
"Value": "%Y-%m-%d %H:%M:%S.%f",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "TIME_FORMAT",
"Value": "%H:%M:%S.%f",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "DATETIME-TZ_FORMAT",
"Value": "%Y-%m-%d %H:%M:%S %z",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "OUTPUT_SORT_ARRAY",
"Value": "ON",
"Comments": 'Print Array output with sort order.',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "OUTPUT_PREFIX",
"Value": "",
"Comments": 'Output Prefix',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "OUTPUT_ERROR_PREFIX",
"Value": "",
"Comments": 'Error Output Prefix',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "OUTPUT_FORMAT",
"Value": "LEGACY",
"Comments": 'TAB|CSV|LEGACY',
"Hidden": False
})
self.m_SQL_OptionList.append({"Name": "CONN_RETRY_TIMES",
"Value": "1",
"Comments": 'Connect retry times.',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "DEBUG",
"Value": "OFF",
"Comments": 'ON|OFF',
"Hidden": True})
self.m_SQL_OptionList.append({"Name": "CONNURL",
"Value": "",
"Comments": 'Connection URL',
"Hidden": True})
self.m_SQL_OptionList.append({"Name": "CONNPROP",
"Value": "",
"Comments": 'Connection Props',
"Hidden": True})
self.m_SQL_OptionList.append({"Name": "SILENT",
"Value": "OFF",
"Comments": '',
"Hidden": True})
self.m_SQL_OptionList.append({"Name": "SQL_EXECUTE",
"Value": "PREPARE",
"Comments": 'DIRECT|PREPARE',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "JOBMANAGER",
"Value": "OFF",
"Comments": 'ON|OFF',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "JOBMANAGER_METAURL",
"Value": "",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "SCRIPT_TIMEOUT",
"Value": -1,
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "SQL_TIMEOUT",
"Value": -1,
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "PRIORITY",
"Value": "",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "SCRIPT_ENCODING",
"Value": "UTF-8",
"Comments": '',
"Hidden": False})
self.m_SQL_OptionList.append({"Name": "RESULT_ENCODING",
"Value": "UTF-8",
"Comments": '',
"Hidden": False})
def get(self, p_ParameterName):
"""根据参数名称返回参数,若不存在该参数,返回None."""
for item in self.m_SQL_OptionList:
if item["Name"] == p_ParameterName:
return item["Value"]
return None
def getOptionList(self):
"""返回全部的运行参数列表"""
return self.m_SQL_OptionList
def set(self, p_ParameterName, p_ParameterValue, p_ParameterDefaultValue=None, p_Hidden=False):
"""设置运行参数, 若p_ParameterValue为空,则加载默认参数"""
for pos in range(0, len(self.m_SQL_OptionList)):
if self.m_SQL_OptionList[pos]["Name"] == p_ParameterName:
if p_ParameterValue is None:
m_ParameterValue = None
else:
m_ParameterValue = str(p_ParameterValue).strip()
if m_ParameterValue.upper().startswith("${ENV(") and m_ParameterValue.upper().endswith(")}"):
m_EnvName = m_ParameterValue[6:-2]
if m_EnvName in os.environ:
m_ParameterValue = os.environ[m_EnvName]
else:
m_ParameterValue = None
if m_ParameterValue is None:
if p_ParameterDefaultValue is None:
m_ParameterValue = ""
else:
m_ParameterValue = p_ParameterDefaultValue
self.m_SQL_OptionList[pos]["Value"] = m_ParameterValue
return True
# 对@开头的进行保存和处理
m_ParameterName = p_ParameterName.strip()
if m_ParameterName.startswith("@"):
m_ParameterValue = p_ParameterValue.strip()
if m_ParameterValue.upper().startswith("${ENV(") and m_ParameterValue.upper().endswith(")}"):
m_EnvName = m_ParameterValue[6:-2]
if m_EnvName in os.environ:
m_ParameterValue = os.environ[m_EnvName]
else:
m_ParameterValue = None
if m_ParameterValue is None:
if p_ParameterDefaultValue is None:
m_ParameterValue = ""
else:
m_ParameterValue = p_ParameterDefaultValue
self.m_SQL_OptionList.append({"Name": m_ParameterName,
"Value": m_ParameterValue,
"Hidden": p_Hidden,
"Comments": 'User session variable'})
return True
# 对于不认识的参数信息,直接抛出到上一级别,做CommmonNotFound处理
return False | /robotslacker-sqlcli-0.2.65.tar.gz/robotslacker-sqlcli-0.2.65/sqlcli/sqloption.py | 0.42322 | 0.196248 | sqloption.py | pypi |
from collections import namedtuple
__all__ = []
def export(defn):
"""Decorator to explicitly mark functions that are exposed in a lib."""
globals()[defn.__name__] = defn
__all__.append(defn.__name__)
return defn
SpecialCommand = namedtuple(
"SpecialCommand",
[
"handler",
"command",
"description",
"hidden",
"case_sensitive",
],
)
COMMANDS = {}
@export
class CommandNotFound(Exception):
pass
@export
def parse_special_command(sql):
command, _, arg = sql.partition(" ")
verbose = "+" in command
command = command.strip().replace("+", "")
return command, verbose, arg.strip()
@export
def special_command(
command,
description,
hidden=False, # 是否显示在帮助信息里头
case_sensitive=False, # 是否忽略输入的大小写
):
def wrapper(wrapped):
register_special_command(
wrapped,
command,
description,
hidden,
case_sensitive,
)
return wrapped
return wrapper
@export
def register_special_command(
handler,
command,
description,
hidden=False,
case_sensitive=False
):
cmd = command.lower() if not case_sensitive else command
COMMANDS[cmd] = SpecialCommand(
handler, command, description, hidden, case_sensitive
)
@export
def execute(cls, sql, timeout: int):
"""Execute a special command and return the results. If the special command
is not supported a KeyError will be raised.
"""
command, verbose, arg = parse_special_command(sql)
if (command not in COMMANDS) and (command.lower() not in COMMANDS):
raise CommandNotFound
try:
special_cmd = COMMANDS[command]
except KeyError:
special_cmd = COMMANDS[command.lower()]
if special_cmd.case_sensitive:
raise CommandNotFound("Command not found: %s" % command)
return special_cmd.handler(cls, arg=arg, timeout=timeout)
@special_command("help", "Show this help.")
def show_help(cls, arg, timeout: int):
if cls and arg and timeout:
pass
headers = ["Command", "Description"]
result = []
for _, value in sorted(COMMANDS.items()):
if not value.hidden:
for m_desc in value.description.split('\n'):
result.append((value.command, m_desc))
return [{
"title": None,
"rows": result,
"headers": headers,
"columnTypes": None,
"status": None
}, ]
@special_command("quit", "Quit.")
def quit_sqlcli(cls, arg, timeout: int):
if cls and arg and timeout:
pass
raise EOFError | /robotslacker-sqlcli-0.2.65.tar.gz/robotslacker-sqlcli-0.2.65/sqlcli/commandanalyze.py | 0.691706 | 0.15633 | commandanalyze.py | pypi |
# Robots Exclusion Standard Parser for Python
The `robotspy` Python module implements a parser for `robots.txt` files. The recommended class to use is
`robots.RobotsParser`.
A thin facade `robots.RobotFileParser` can also be used as
a substitute for [`urllib.robotparser.RobotFileParser`](https://docs.python.org/3/library/urllib.robotparser.html),
available in the Python standard library. The class `robots.RobotFileParser` exposes an API that is
mostly compatible with `urllib.robotparser.RobotFileParser`.
The main reasons for this rewrite are the following:
1. It was initially intended to experiment with parsing `robots.txt` files for a link checker project
(not implemented yet).
1. It is attempting to follow the latest internet draft
[Robots Exclusion Protocol](https://tools.ietf.org/html/draft-koster-rep-00).
1. It does not try to be compliant with commonly accepted directives that are not in the current
[specs](https://tools.ietf.org/html/draft-koster-rep-00) such as `request-rate` and `crawl-delay`,
but it currently supports `sitemaps`.
1. It satisfies the same tests as the [Google Robots.txt Parser](https://github.com/google/robotstxt),
except for some custom behaviors specific to Google Robots.
To use the `robots` command line tool (CLI) in a Docker container, read the following section **Docker Image**.
To install `robotspy` globally as a tool on your system with `pipx` skip to the **Global Installation** section.
If you are interested in using `robotspy` in a local Python environment or as a library, skip to section **Module Installation**.
## Docker Image
The Robotspy CLI, `robots`, is available as a [Docker](https://www.docker.com/) automated built image at https://hub.docker.com/r/andreburgaud/robotspy.
If you already have [Docker](https://docs.docker.com/get-docker/) installed on your machine, first pull the image from Docker Hub:
```
$ docker pull andreburgaud/robotspy
```
Then, you can exercise the tool against the following remote Python `robots.txt` test file located at http://www.pythontest.net/elsewhere/robots.txt:
```
# Used by NetworkTestCase in Lib/test/test_robotparser.py
User-agent: Nutch
Disallow: /
Allow: /brian/
User-agent: *
Disallow: /webstats/
```
The following examples demonstrate how to use the `robots` command line with the Docker container:
```
$ # Example 1: User agent "Johnny" is allowed to access path "/"
$ docker run --rm andreburgaud/robotspy http://www.pythontest.net/elsewhere/robots.txt Johnny /
user-agent 'Johnny' with path '/': ALLOWED
```
```
$ # Example 2: User agent "Nutch" is not allowed to access path "/brian"
$ docker run --rm andreburgaud/robotspy http://www.pythontest.net/elsewhere/robots.txt Nutch /brian
user-agent 'Nutch' with path '/brian': DISALLOWED
```
```
$ # Example 3: User agent "Johnny" is not allowed to access path "/webstats/"
docker run --rm andreburgaud/robotspy http://www.pythontest.net/elsewhere/robots.txt Johnny /webstats/
user-agent 'Johnny' with path '/webstats/': DISALLOWED
```
The arguments are the following:
1. Location of the robots.txt file (`http://www.pythontest.net/elsewhere/robots.txt`)
1. User agent name (`Johnny`)
1. Path or URL (`/`)
Without any argument, `robots` displays the help:
```
docker run --rm andreburgaud/robotspy
usage: robots <robotstxt> <useragent> <path>
Shows whether the given user agent and path combination are allowed or disallowed by the given robots.txt file.
positional arguments:
robotstxt robots.txt file path or URL
useragent User agent name
path Path or URI
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
```
To use the CLI `robots` as a global tools, continue to the following section. If you want to use `robotspy` as a Python module, skip to **Module Installation**.
## Global Installation with pipx
If you only want to use the command line tool `robots`, you may want to use [pipx](https://pipxproject.github.io/pipx/installation/) to install it as a global tool on your system.
To install `robotspy` using `pipx` execute the following command:
```bash
$ pipx install robotspy
```
When `robotspy` is installed globally on your system, you can invoke it from any folder locations. For example, you can execute:
```bash
$ robots --version
robots 0.6.0
```
You can see more detailed usages in section **Usage**.
## Module Installation
**Note**: Python 3.8.x or 3.9.x required
You preferably want to install the `robotspy` package after creating a Python virtual environment,
in a newly created directory, as follows:
```
$ mkdir project && cd project
$ python -m venv .venv
$ . .venv/bin/activate
(.venv) $ python -m pip install --upgrade pip
(.venv) $ python -m pip install --upgrade setuptools
(.venv) $ python -m pip install robotspy
(.venv) $ python -m robots --help
...
```
On Windows:
```
C:/> mkdir project && cd project
C:/> python -m venv .venv
C:/> .venv\scripts\activate
(.venv) c:\> python -m pip install --upgrade pip
(.venv) c:\> python -m pip install --upgrade setuptools
(.venv) c:\> python -m pip install robotspy
(.venv) c:\> python -m robots --help
...
```
## Usage
The `robotspy` package can be imported as a module and also exposes an executable, `robots`, invocable with
`python -m`. If installed globally with `pipx`, the command `robots` can be invoked from any folders. The usage examples in the following section use the command `robots`, but you can also substitute it with `python -m robots` in a virtual environment.
### Execute the Tool
After installing `robotspy`, you can validate the installation by running the following command:
```
$ robots --help
usage: robots <robotstxt> <useragent> <path>
Shows whether the given user agent and path combination are allowed or disallowed by the given robots.txt file.
positional arguments:
robotstxt robots.txt file path or URL
useragent User agent name
path Path or URI
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
```
### Examples
The content of http://www.pythontest.net/elsewhere/robots.txt is the following:
```
# Used by NetworkTestCase in Lib/test/test_robotparser.py
User-agent: Nutch
Disallow: /
Allow: /brian/
User-agent: *
Disallow: /webstats/
```
To check if the user agent `Nutch` can fetch the path `/brian/` you can execute:
```
$ robots http://www.pythontest.net/elsewhere/robots.txt Nutch /brian/
user-agent 'Nutch' with path '/brian/': ALLOWED
```
Or, you can also pass the full URL, http://www.pythontest.net/brian/:
```
$ robots http://www.pythontest.net/elsewhere/robots.txt Nutch /brian/
user-agent 'Nutch' with url 'http://www.pythontest.net/brian/': ALLOWED
```
Can user agent `Nutch` fetch the path `/brian`?
```
$ robots http://www.pythontest.net/elsewhere/robots.txt Nutch /brian
user-agent 'Nutch' with path '/brian': DISALLOWED
```
Or, `/`?
```
$ robots http://www.pythontest.net/elsewhere/robots.txt Nutch /
user-agent 'Nutch' with path '/': DISALLOWED
```
How about user agent `Johnny`?
```
$ robots http://www.pythontest.net/elsewhere/robots.txt Johnny /
user-agent 'Johnny' with path '/': ALLOWED
```
### Use the Module in a Project
If you have a virtual environment with the `robotspy` package installed, you can use the `robots` module from the Python shell:
```
(.venv) $ python
>>> import robots
>>> parser = robots.RobotsParser.from_uri('http://www.pythontest.net/elsewhere/robots.txt')
>>> useragent = 'Nutch'
>>> path = '/brian/'
>>> result = parser.can_fetch(useragent, path)
>>> print(f'Can {useragent} fetch {path}? {result}')
Can Nutch fetch /brian/? True
>>>
```
### Bug in the Python standard library
There is a bug in [`urllib.robotparser`](https://docs.python.org/3/library/urllib.robotparser.html)
from the Python standard library that causes the following test to differ from the example above with `robotspy`.
The example with `urllib.robotparser` is the following:
```
$ python
>>> import urllib.robotparser
>>> rp = urllib.robotparser.RobotFileParser()
>>> rp.set_url('http://www.pythontest.net/elsewhere/robots.txt')
>>> rp.read()
>>> rp.can_fetch('Nutch', '/brian/')
False
```
Notice that the result is `False` whereas `robotspy` returns `True`.
Bug [bpo-39187](https://bugs.python.org/issue39187) was open to raise awareness on this issue and PR
https://github.com/python/cpython/pull/17794 was submitted as a possible fix. `robotspy` does not
exhibit this problem.
## Development
The main development dependency is `pytest` for executing the tests. It is automatically
installed if you perform the following steps:
```
$ git clone https://github.com/andreburgaud/robotspy
$ cd robotspy
$ python -m venv .venv --prompt robots
$ . .venv/bin/activate
(robots) $ python -m pip install -r requirements.txt
(robots) $ python -m pip install -e .
(robots) $ make test
(robots) $ deactivate
$
```
On Windows:
```
C:/> git clone https://github.com/andreburgaud/robotspy
C:/> cd robotspy
C:/> python -m venv .venv --prompt robotspy
C:/> .venv\scripts\activate
(robots) c:\> python -m pip install -r requirements.txt
(robots) c:\> python -m pip install -e .
(robots) c:\> make test
(robots) c:\> deactivate
```
## Global Tools
The following tools were used during the development of `robotspy`:
* [Black](https://github.com/psf/black)
* [Mypy](http://mypy-lang.org/)
* [Pylint](https://www.pylint.org/)
* [twine](https://pypi.org/project/twine/)
See the build file, `Makefile` or `make.bat` on Windows, for the commands and parameters.
## Release History
* 0.7.0:
* Fixed bug with the argument path when using the CLI
* Print 'url' when the argument is a URL, 'path' otherwise
* 0.6.0:
* Simplified dependencies by keeping only `pytest` in `requirements.txt`
* 0.5.0:
* Updated all libraries. Tested with Python 3.9.
* 0.4.0:
* Fixed issue with robots text pointed by relative paths
* Integration of [Mypy](http://mypy-lang.org/), [Black](https://github.com/psf/black) and [Pylint](https://www.pylint.org/) as depencencies to ease cross-platform development
* Limited `make.bat` build file for Windows
* Git ignore vscode files, `tmp` directory, multiple virtual env (`.venv*`)
* Fixed case insensitive issues on Windows
* Tests successful on Windows
* Added an ATRIBUTIONS files and build task to generate it
* Upgraded `pyparsing` and `certifi`
* 0.3.3:
* Upgraded `tqdm`, and `cryptography` packages
* 0.3.2:
* Upgraded `bleach`, `tqdm`, and `setuptools` packages
* 0.3.1:
* Updated `idna` and `wcwidth` packages
* Added `pipdeptree` package to provide visibility on dependencies
* Fixed `mypy` errors
* Explicitly ignored `pylint` errors related to commonly used names like `f`, `m`, or `T`
* 0.3.0: Updated `bleach` package to address CVE-2020-6802
* 0.2.0: Updated the documentation
* 0.1.0: Initial release
## License
[MIT License](LICENSE.md) | /robotspy-0.7.0.tar.gz/robotspy-0.7.0/README.md | 0.4917 | 0.870212 | README.md | pypi |
import argparse
import pathlib
import sys
import urllib.parse
import robots
def init_cli() -> argparse.ArgumentParser:
"""Initialize the argument parser to handle the command line interface."""
cli: argparse.ArgumentParser = argparse.ArgumentParser(
usage="%(prog)s <robotstxt> <useragent> <path>",
description=(
"Shows whether the given user agent and URI combination "
"are allowed or disallowed by the given robots.txt file."
),
)
cli.prog = __package__
cli.add_argument(
"-v", "--version", action="version", version=f"{cli.prog} {robots.__version__}"
)
cli.add_argument("robotstxt", help="robots.txt file path or URL")
cli.add_argument("useragent", help="User agent name")
cli.add_argument("path", help="Path or URL")
return cli
def is_url(path_uri: str) -> bool:
"""Validate if a given string is a URL."""
res = urllib.parse.urlsplit(path_uri)
return res.scheme in ("http", "https", "ftp", "file")
def normalize_uri(path_uri: str) -> str:
"""Convert any path to URI. If not a path, return the URI."""
if not isinstance(path_uri, pathlib.Path) and is_url(path_uri):
return path_uri
return pathlib.Path(path_uri).resolve().as_uri()
def create_robots(robots_uri: str) -> robots.RobotsParser:
"""Instantiate a RobotParser object with a URI."""
parser: robots.RobotsParser = robots.RobotsParser.from_uri(robots_uri)
return parser
def main() -> None:
"""Entry point for the package as a Python module (python -m)"""
cli = init_cli()
args = cli.parse_args()
robots_uri = normalize_uri(args.robotstxt)
robots_parser = create_robots(robots_uri)
allowed = robots_parser.can_fetch(args.useragent, args.path)
allowed_str = "ALLOWED" if allowed else "DISALLOWED"
url_or_path = "url" if is_url(args.path) else "path"
print(f"user-agent '{args.useragent}' with {url_or_path} '{args.path}': {allowed_str}")
if errors := robots_parser.errors:
for error in errors:
print(f"{error[0]} -> {error[1]}", file=sys.stderr)
if warnings := robots_parser.warnings:
for warning in warnings:
print(f"{warning[0]} -> {warning[1]}", file=sys.stderr)
if __name__ == "__main__":
main() | /robotspy-0.7.0.tar.gz/robotspy-0.7.0/robots/__main__.py | 0.517571 | 0.179674 | __main__.py | pypi |
import time
from typing import List
from . import parser
def gen_lines(lines: List[str]):
"""Instantiate a generator from a list"""
return (line for line in lines)
class RobotFileParser(parser.RobotsParser):
"""Thin wrapper on RobotsParser to enable some level of compatibility with
urllib.robotparser.RobotFileParser. The implementation is incomplete, for
example, crawl_delay and request_rate are hard-coded to return None. The
unit tests take into account the implementation."""
def set_url(self, url):
"""Sets the URL referring to a robots.txt file."""
self.url = url
def read(self):
"""Populate the tokens if a URL is assigned to the url attribute"""
if self.url:
self.parse_tokens(parser.gen_tokens(self.gen_uri, self.url))
else:
self._errors.append(
(
self.url,
"RobotFileParser.read requires RobotFileParser.url to be set",
)
)
def parse(self, lines):
"""Method 'parse' compatible with urllib.robotparser.RobotFileParser. Parses the tokens
given an iterator."""
self.parse_tokens(parser.gen_tokens(gen_lines, lines))
def mtime(self):
"""Method 'mtime' compatible with urllib.robotparser.RobotFileParser. Return the timestamp
initialized when parsing a robots.txt url."""
return self.timestamp
def modified(self):
"""Method 'modified' compatible with urllib.robotparser.RobotFileParser. When invoked,
instantiate the internal timestamp to the current time."""
self.timestamp = time.time()
def crawl_delay(self, _: str):
"""The 'crawl-delay' directive is not recognize by the Google robots parser. Ignoring it in
robotspy. Keep this method for compatibility with urllib.robotparser."""
self._warnings.append(
("crawl-delay", parser.Errors.WARNING_CRAWL_DELAY_IGNORED)
)
def request_rate(self, _: str):
"""The 'request-rate' directive is not recognize by the Google robots parser. Ignoring it in
robotspy. Keep this method for compatibility with urllib.robotparser."""
self._warnings.append(
("request-rate", parser.Errors.WARNING_REQUEST_RATE_IGNORED)
)
def site_maps(self):
"""Method site_maps compatible with urllib.robotparser.RobotFileParser. Return the list of
sitemaps encountered while parsing a robots.txt content."""
return self.sitemaps | /robotspy-0.7.0.tar.gz/robotspy-0.7.0/robots/robotparser.py | 0.820901 | 0.364664 | robotparser.py | pypi |
StatusChecker
=============
.. contents::
:local:
Introduction
------------
StatusChecker is a tool for validating that executed `Robot Framework`_
test cases have expected statuses and log messages. It is mainly useful
for Robot Framework test library developers who want to use Robot
Framework to also test their libraries. StatusChecker 1.3 and newer are
compatible both with Python 2 and Python 3.
StatusChecker project is hosted at GitHub_ and downloads are at
PyPI_.
.. _Robot Framework: http://robotframework.org
.. _GitHub: https://github.com/robotframework/statuschecker
.. _PyPI: https://pypi.python.org/pypi/robotstatuschecker
.. _pip: http://pip-installer.org
Installation instructions
-------------------------
The easiest way to install StatusChecker is by using pip_::
pip install robotstatuschecker
Alternatively you can get the code by cloning the project from
GitHub_ or downloading the source distribution from PyPI_ and
extracting it. After that you can install the tool with::
python setup.py install
Usage
-----
From the command line::
python -m robotstatuschecker infile [outfile]
Programmatically:
.. sourcecode:: python
from robotstatuschecker import process_output
process_output('infile.xml', 'outfile.xml')
If an output file is not given, the input file is edited in place.
Defining expected test status
-----------------------------
By default, all test cases are expected to *PASS* and have no
message. Changing the expected status to *FAIL* is done by having
the word ``FAIL`` (in uppercase) somewhere in the test case
documentation. The expected error message must then follow
the ``FAIL`` marker.
For robotframework version 4 you can also change the expected status
to *SKIP* by adding the word ``SKIP`` in the test case documentation.
Like Fail, the expected skip message must follow the word ``SKIP``.
If a test documentation contains the words ``FAIL`` and ``SKIP``, ``SKIP``
will be ignored and the expected status will be *FAIL*.
If a test is expected to *PASS* with a certain message, the word
``PASS`` must be added to its documentation explicitly and the
expected message given after that.
If a message check should happen in test setup or teardown, that check
must be prefixed with ``SETUP`` or ``TEARDOWN`` word.
The expected message can also be specified as a regular expression by
prefixing it with ``REGEXP:``. The specified regular expression
must match the error message fully. Having spaces between the status,
the message and the possible regular expression prefix is optional.
An alternative to using regular expressions is using glob patterns where
``*`` matches anything (including newline) and ``?`` matches any single
character. This is can be accomplished by starting the expected message
with ``GLOB:``.
Finally, it is possible to test that the message starts with something
by prefixing the expected message with ``STARTS:``.
The following examples illustrate different ways to define test
statuses and messages:
.. sourcecode:: robotframework
*** Test Cases ***
Simple failure
[Documentation] FAIL Expected error message
Steps
Check in test setup is done by SETUP marker
[Documentation] LOG SETUP This first log message in test setup
[Setup] Test specific setup
Steps
Exclude documentation before marker
[Documentation] This text is ignored FAIL Expected error message
Steps
Regexp example
[Documentation] FAIL REGEXP: (IOError|OSError): .*
Steps
Glob example
[Documentation] FAIL GLOB: ??Error: *
Steps
Start example
[Documentation] FAIL STARTS: IOError:
Steps
Passing without message
Steps
Passing with message
[Documentation] PASS Expected message
Steps
Defining expected log messages
------------------------------
The expected keyword log messages can also be defined in the test case
documentation using a syntax such as::
LOG x.y:z LEVEL Actual message
The part before the colon specifies the keyword to check. For
example, ``1`` means first keyword, ``1.2`` is the second child
keyword of the first keyword, and so on.
The part after the colon species the message. For example, ``1:2``
means the second message of the first keyword and ``1.2:3`` is
the third message of the second child keyword of the first keyword.
The message index is optional and defaults to ``1``.
The message index also supports wildcard ``*``. For example ``1:*``
matches any message of the first keyword.
Message level is specified before the actual message, and it can be
any of the valid log levels in capital letters. If the level is not
given it defaults to ``INFO``. Starting from 1.4 release also
``ERROR`` level is supported. The message level also supports wildcard
``ANY`` which will match all log levels.
Possible leading and trailing whitespace is ignored both in the expected
and in the actual log message.
This syntax can be used multiple times to test multiple messages. It
also works together with specifying the expected error message with
``FAIL``, but it that case ``FAIL`` and the expected error must
be first.
It is also possible to give the message as a regular expression or glob
pattern or to give just the start of the message. This is accomplished
by prefixing the message with ``REGEXP:``, ``GLOB:`` or ``STARTS:``,
respectively, exactly like when `defining expected test status`_.
Finally, to check that a keyword does not have a certain message, it
is possible to use ``NONE`` in the place of the message.
.. sourcecode:: robotframework
*** Test cases ***
Simple example
[Documentation] LOG 1 Hello, world!
Steps
Nested keywords
[Documentation] LOG 2.1 1st child of 2nd kw
Steps
Message index
[Documentation] LOG 2:2 2nd msg of 2nd kw
Steps
Nested and index
[Documentation] LOG 3.1:2 2nd msg of 3rd kw's 1st child
Steps
Log levels
[Documentation] LOG 2 DEBUG Debug-level message
... LOG 1.2:3 WARN Warning
Steps
Multiple messages
[Documentation] LOG 1 First tested message
... LOG 1.2 Second tested message
... LOG 2.2.1 DEBUG Third tested message
Steps
Status and log
[Documentation] FAIL Expected error message
... LOG 1.2 Expected log message
Steps
Regexp message
[Documentation] LOG 1 REGEXP: (Hello|Hi) world!
Steps
Glob message
[Documentation] LOG 1 GLOB: * world!
Steps
Start of the message
[Documentation] LOG 1 STARTS: Hello w
Steps
No message
[Documentation] LOG 1:1 Test that we have only 1 msg
... LOG 1:2 NONE
Steps
Count Messages
[Documentation] LOG 4 COUNT: 2 # Fourth keyword should have excatly 2 messages.
Steps
| /robotstatuschecker-3.0.1.tar.gz/robotstatuschecker-3.0.1/README.rst | 0.844473 | 0.692837 | README.rst | pypi |
Python unittest test suite for Robot Framework
==============================================
This is an experimental package
for wrapping Robot Framework test suites into Python unittest suites
to make it possible to run Robot Framework tests
as `plone.testing`_'s layered test suites:
.. code:: python
import unittest
from plone.testing import layered
from robotsuite import RobotTestSuite
from my_package.testing import ACCEPTANCE_TESTING
def test_suite():
suite = unittest.TestSuite()
suite.addTests([
layered(RobotTestSuite('mysuite.txt'),
layer=ACCEPTANCE_TESTING),
])
return suite
*RobotTestSuite* splits Robot Framework test suites into separate
unittest test cases so that Robot will be run once for every test
case in every test suite parsed from the given Robot Framework
test suite.
Because of that, each Robot will generate a separate test report
for each test.
Each report will have it's own folder,
which are created recursively
reflecting the structure of the given test suite.
*RobotTestSuite*'s way of wrapping tests into
unittest's test suite is similar to how doctest-module's
DocTestSuite does its wrappings.
See the documentation of DocTestSuite for
possible common parameters (e.g. for how to pass a test suite from a
different package).
The main motivation behind this package is to make
Robot Framework support existing test fixtures and test isolation
when testing `Plone`_.
Yet, this should help anyone wanting to use Robot Framework with
`zope.testrunner`_ or other Python unittest compatible test runner.
.. _plone.testing: http://pypi.python.org/pypi/plone.testing
.. _zope.testrunner: http://pypi.python.org/pypi/zope.testrunner
.. _Plone: http://pypi.python.org/pypi/Plone
If this works for you, please contribute at:
http://github.com/collective/robotsuite/
.. image:: https://github.com/collective/robotsuite/actions/workflows/build.yml/badge.svg?branch=master
:target: https://github.com/collective/robotsuite/actions
Setting robot variables from environment variables
--------------------------------------------------
Robot Framework supports overriding test variables from command-line, which
is not-available when running tests as robotsuite-wrapped with other test
runners. That's why robotsuite supports settings variables as environment
variables so that every ``ROBOT_``-prefixed environment variable will be
mapped into corresponding test variable without the ``ROBOT_``-prefix.
Declaring tests non-critical by given set of tags
-------------------------------------------------
.. note:: Criticality is no-longer supported in Robot Framework >= 4.0 and has been
replaced with SKIP status. Robotsuite does not take a stance on SKIP status yet.
Robot Framework supports declaring tests with given tags as *non-critical*
to prevent their failing to fail the complete build on CI. This is supported
as keyword argument for *RobotTestSuite* as follows:
.. code:: python
def test_suite():
suite = unittest.TestSuite()
suite.addTests([
layered(RobotTestSuite('mysuite.txt',
noncritical=['non-critical-tag']),
layer=ACCEPTANCE_TESTING),
])
return suite
Setting zope.testrunner-level
-----------------------------
`zope.testrunner`_ supports annotating test suites with levels to avoid
slow test being run unless wanted:
.. code:: python
def test_suite():
suite = unittest.TestSuite()
suite.addTests([
layered(RobotTestSuite('mysuite.txt'),
layer=ACCEPTANCE_TESTING),
])
suite.level = 10
return suite
Retry failing tests
-------------------
You can retry a failed test.
This can be useful for flaky robot browser tests.
Warning: this may not be good for all types of test.
For example any changes that were done in the test until the first failure, may persist.
You can enable retries in two ways:
- Set an environment variable ``ROBOTSUITE_RETRY_COUNT=X``.
- Override this by passing ``retry_count=X`` to a ``RobotTestSuite`` call.
The default is zero: no retries.
The retry count *excludes* the original try.
.. code:: python
def test_suite():
suite = unittest.TestSuite()
suite.addTests([
robotsuite.RobotTestSuite('test_example.robot', retry_count=3),
robotsuite.RobotTestSuite('test_variables.robot'),
robotsuite.RobotTestSuite('test_setups', retry_count=2)
])
return suite
Appending test results to existing test report
----------------------------------------------
When running Robot Framework through robotsuite, its test reports are created
into the current working directory with filenames ``robot_output.xml``,
``robot_log.html`` and ``robot_report.html``. The default behavior is to
override the existing ``robot_output.xml`` (and also the other report files
generated from that).
To merge test results from separate test runs into the same test report, set
environment variable ``ROBOTSUITE_APPEND_OUTPUT_XML=1`` to prevent robotsuite
from overriding the existing test results, but to always append to the existing
``robot_output.xml``.
Filtering test execution errors
-------------------------------
Set environment variable ``ROBOTSUITE_LOGLEVEL=ERROR`` to filter all top level
*Test Execution Errors* below the given log level (e.g. ERROR) from the merged
test report. This is useful when unnecessary warnings are leaking from the
tested code into Robot Framework logs.
Including or skipping all RobotTestSuite-wrapped tests
------------------------------------------------------
Robot Framework is often used with Selenium2Library_ to write acceptance test
using the Selenium-framework. Yet, because those test may be slow to run, one
might want sometimes (e.g. on CI) to run everything except the robotsuite
wrapped tests, and later only the robotsuite wrapped tests.
This can be achieved for sure, with injecting a custom string into the names
of robotsuite-wrapped tests with ``ROBOTSUITE_PREFIX``-environment variable
and then filter the test with that string.
E.g. run everything except the robotsuite wrapped tests with:
.. code:: bash
$ ROBOTSUITE_PREFIX=ROBOTSUITE bin/test --all -t \!ROBOTSUITE
and the other way around with:
.. code:: bash
$ ROBOTSUITE_PREFIX=ROBOTSUITE bin/test --all -t ROBOTSUITE
.. _Selenium2Library: https://pypi.python.org/pypi/robotframework-selenium2library
Re-using test suites from other packages
----------------------------------------
Sometime it could be useful to re-use acceptance test from some upstream
package to test your slightly tailored package (e.g. with a custom theme).
This can be done with by defining the test lookup location with
``package``-keyword argment for ``RobotTestSuite``:
.. code:: python
def test_suite():
suite = unittest.TestSuite()
suite.addTests([
layered(leveled(
robotsuite.RobotTestSuite('robot',
package='Products.CMFPlone.tests'),
), layer=PLONE_APP_MOSAIC_NO_PAC_ROBOT),
])
return suite
| /robotsuite-2.3.1.tar.gz/robotsuite-2.3.1/README.rst | 0.831006 | 0.76432 | README.rst | pypi |
import ast
import astunparse
import inspect
import json
import warnings
from tqdm import TqdmExperimentalWarning
# Filter out the TqdmExperimentalWarning
warnings.filterwarnings("ignore", category=TqdmExperimentalWarning)
def analyze_code(code):
# Parse the code using the ast module
tree = ast.parse(code)
# Define a custom AST visitor to collect function definitions, imported modules, and global variable assignments
class FunctionInfoCollector(ast.NodeVisitor):
def visit_Import(self, node):
# Collect imported modules
for alias in node.names:
imported_modules.append(alias.name)
self.generic_visit(node)
def visit_ImportFrom(self, node):
# Collect imported modules
imported_modules.append(node.module)
self.generic_visit(node)
def visit_Assign(self, node):
# Collect global variable assignments
if isinstance(node.targets[0], ast.Name):
variable_name = node.targets[0].id
# Ignore private variables (names starting with an underscore)
if not variable_name.startswith('_'):
global_variables[variable_name] = astunparse.unparse(node.value).strip()
self.generic_visit(node)
def visit_FunctionDef(self, node):
# Collect defined functions and their docstrings
function_name = node.name
# Ignore private functions (names starting with an underscore)
if not function_name.startswith('_'):
docstring = ast.get_docstring(node) if ast.get_docstring(node) else ""
summary = docstring.split('\n')[0] if docstring else ""
defined_functions[function_name] = {
"docstring": docstring,
"summary": summary
}
self.generic_visit(node)
def visit_ClassDef(self, node):
# Collect defined classes and their methods
class_name = node.name
# Ignore private classes (names starting with an underscore)
if not class_name.startswith('_'):
class_methods = {}
for item in node.body:
if isinstance(item, ast.FunctionDef):
method_name = item.name
# Ignore private methods (names starting with an underscore)
if not method_name.startswith('_') or method_name == "__init__":
docstring = ast.get_docstring(item) if ast.get_docstring(item) else ""
summary = docstring.split('\n')[0] if docstring else ""
class_methods[method_name] = {
"docstring": docstring,
"summary": summary
}
defined_classes[class_name] = class_methods
self.generic_visit(node)
# Use the custom AST visitor to collect defined functions, imported modules, and global variable assignments
imported_modules = []
global_variables = {}
defined_functions = {}
defined_classes = {}
collector = FunctionInfoCollector()
collector.visit(tree)
# Load imported modules and retrieve their function signatures
module_info = {}
for module_name in imported_modules:
try:
module = __import__(module_name)
functions = {}
for name, obj in inspect.getmembers(module, inspect.isfunction):
# Ignore private functions (names starting with an underscore)
if not name.startswith('_'):
try:
signature = [(param_name, param.annotation.__name__ if hasattr(param.annotation, '__name__') and param.annotation != inspect.Parameter.empty else "", "required" if param.default is inspect.Parameter.empty else "") for param_name, param in inspect.signature(obj).parameters.items()]
# create a signature summary by converting into a string
signature_summary = name + "(" + ", ".join([f"[{param_name},{param_type}, {param_default}]" for param_name, param_type, param_default in signature]) + ")"
docstring = inspect.getdoc(obj)
docstring_summary = docstring.split('\n')[0] if docstring else ""
functions[name] = {
"signature": signature,
"docstring": docstring,
"summary": signature_summary + ": " + docstring_summary
}
except ValueError:
# Skip functions for which a signature cannot be retrieved
pass
classes = {}
for name, obj in inspect.getmembers(module, inspect.isclass):
# Ignore private classes (names starting with an underscore)
if not name.startswith('_'):
class_methods = {}
for method_name, method_obj in inspect.getmembers(obj, inspect.isfunction):
# Ignore private methods (names starting with an underscore)
if not method_name.startswith('_'):
try:
signature = [(param_name, param.annotation.__name__ if hasattr(param.annotation, '__name__') and param.annotation != inspect.Parameter.empty else "", "required" if param.default is inspect.Parameter.empty else "") for param_name, param in inspect.signature(method_obj).parameters.items()]
# create a signature summary by converting into a string
signature_summary = method_name + "(" + ", ".join([f"[{param_name},{param_type}, {param_default}]" for param_name, param_type, param_default in signature]) + ")"
docstring = inspect.getdoc(method_obj)
docstring_summary = docstring.split('\n')[0] if docstring else ""
class_methods[method_name] = {
"signature": signature,
"docstring": docstring,
"summary": signature_summary + ": " + docstring_summary
}
except ValueError:
# Skip methods for which a signature cannot be retrieved
pass
summary = ""
for _, method_info in class_methods.items():
summary += method_info["summary"] + "\n"
classes[name] = {
'methods' : class_methods,
'summary' : summary
}
summary = "\n"
num = 1
for _, function_info in functions.items():
function_summary = function_info["summary"]
summary += f"{num}. {function_summary} \n"
num += 1
module_info[module_name] = {
'functions': functions,
'classes': classes,
'summary': summary
}
except ImportError:
print(f"Warning: Unable to import module '{module_name}'.")
global_variables_str = '\n'.join([f'{key}={value}' for key, value in global_variables.items()])
# Convert the module information, global variables, defined functions, and defined classes to a JSON array
output = {
"module_info": module_info,
"global_variables": global_variables_str,
"defined_functions": defined_functions,
"defined_classes": defined_classes
}
json_output = json.dumps(output, indent=4)
# json_output = json.dumps(output['module_info']['pinecone']['classes']['Index'], indent=4)
# Print the JSON array
print(json_output)
requestJson = """
{{requestJson}}
"""
if __name__ == '__main__':
# Sample code for testing
request = json.loads(requestJson)
notebook = request["notebook"]
analyze_code(notebook) | /roboweb_server-0.1.42-py3-none-any.whl/roboweb_server/static/deprecated_analyzer.py | 0.550124 | 0.270452 | deprecated_analyzer.py | pypi |
import ast
import astunparse # Import the astunparse module
import inspect
import json
import warnings
from tqdm import TqdmExperimentalWarning
# Filter out the TqdmExperimentalWarning
warnings.filterwarnings("ignore", category=TqdmExperimentalWarning)
requestJson = """
{{requestJson}}
"""
def analyze_code(code):
# Parse the code using the ast module
tree = ast.parse(code)
# Define a custom AST visitor to collect function signatures and calls
class FunctionInfoCollector(ast.NodeVisitor):
def visit_FunctionDef(self, node):
# Collect the argument names and types for the function
arg_info = [(arg.arg, astunparse.unparse(arg.annotation).strip(), arg.default is None) for arg in node.args.args]
function_signatures[node.name] = arg_info
# Collect the docstring for the function
function_docstrings[node.name] = ast.get_docstring(node)
self.generic_visit(node)
def visit_Call(self, node):
# Collect function call information
if hasattr(node.func, 'id'):
function_name = node.func.id
function_calls[function_name] = node
self.generic_visit(node)
def visit_Import(self, node):
# Collect imported modules
for alias in node.names:
imported_modules.append(alias.name)
self.generic_visit(node)
def visit_ImportFrom(self, node):
# Collect imported modules
imported_modules.append(node.module)
self.generic_visit(node)
# Use the custom AST visitor to collect function signatures, calls, and docstrings
function_signatures = {}
function_docstrings = {}
function_calls = {}
imported_modules = []
collector = FunctionInfoCollector()
collector.visit(tree)
# Load imported modules and retrieve their function signatures
module_info = {}
for module_name in imported_modules:
try:
module = __import__(module_name)
functions = {}
for name, obj in inspect.getmembers(module, inspect.isfunction):
try:
signature = [(param_name, param.annotation.__name__ if param.annotation != inspect.Parameter.empty else "", "required" if param.default is inspect.Parameter.empty else "") for param_name, param in inspect.signature(obj).parameters.items()]
docstring = inspect.getdoc(obj)
functions[name] = {
"signature": signature,
"docstring": docstring
}
except ValueError:
# Skip functions for which a signature cannot be retrieved
pass
module_info[module_name] = functions
except ImportError:
print(f"Warning: Unable to import module '{module_name}'.")
# Convert the module information to a JSON array
json_output = json.dumps(module_info, indent=4)
# Print the JSON array
print(json_output)
if __name__ == '__main__':
request = json.loads(requestJson)
print(json.dumps("hello world"))
# print(json.dumps(request))
# Sample code for testing
# analyze_code(notebook) | /roboweb_server-0.1.42-py3-none-any.whl/roboweb_server/static/prompt_eng.py | 0.506836 | 0.278742 | prompt_eng.py | pypi |
<p align="center"><img width="70%" src="docs/source/\_static/img/parlai.png" /></p>
--------------------------------------------------------------------------------
ParlAI (pronounced “par-lay”) is a framework for dialog AI research, implemented in Python.
Its goal is to provide researchers:
- a unified framework for sharing, training and testing dialog models
- many popular datasets available all in one place, with the ability to multi-task over them
- seamless integration of [Amazon Mechanical Turk](https://www.mturk.com/mturk/welcome) for data collection and human evaluation
- integration with [Facebook Messenger](http://www.parl.ai/static/docs/tutorial_messenger.html) to connect agents with humans in a chat interface
Many [tasks](https://github.com/facebookresearch/ParlAI/blob/master/parlai/tasks/task_list.py) are supported, including popular datasets such as [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/), [bAbI tasks](https://arxiv.org/abs/1502.05698), [MS MARCO](http://www.msmarco.org/), [MCTest](https://www.microsoft.com/en-us/research/publication/mctest-challenge-dataset-open-domain-machine-comprehension-text/), [WikiQA](https://www.microsoft.com/en-us/download/details.aspx?id=52419), [WebQuestions](http://www.aclweb.org/anthology/D13-1160), [SimpleQuestions](https://arxiv.org/abs/1506.02075), [WikiMovies](https://arxiv.org/abs/1606.03126), [QACNN & QADailyMail](https://arxiv.org/abs/1506.03340), [CBT](https://arxiv.org/abs/1511.02301), [BookTest](https://arxiv.org/abs/1610.00956), [bAbI Dialog tasks](https://arxiv.org/abs/1605.07683), [Ubuntu Dialog](https://arxiv.org/abs/1506.08909), [OpenSubtitles](http://opus.lingfil.uu.se/OpenSubtitles.php), [Cornell Movie](https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html), [VQA-COCO2014](http://visualqa.org/), [VisDial](https://arxiv.org/abs/1611.08669) and [CLEVR](http://cs.stanford.edu/people/jcjohns/clevr/). See [here](https://github.com/facebookresearch/ParlAI/blob/master/parlai/tasks/task_list.py) for the current complete task list.
Included are examples of training neural models with [PyTorch](http://pytorch.org/) and [Lua Torch](http://torch.ch/), with batch training on GPU or hogwild training on CPUs. Using [Tensorflow](https://www.tensorflow.org/) instead is also straightforward.
Our aim is for the number of tasks and agents that train on them to grow in a community-based way.
ParlAI is described in the following paper:
[“ParlAI: A Dialog Research Software Platform", arXiv:1705.06476](https://arxiv.org/abs/1705.06476).
We are in an early-release Beta. Expect some adventures and rough edges.<br>
See the [news page](https://github.com/facebookresearch/ParlAI/blob/master/NEWS.md) for the latest additions & updates, and the website [http://parl.ai](http://parl.ai) for further docs.
## Goals
Unified framework for evaluation of dialogue models
- downloads tasks/datasets on demand and provides the same simple interface to them
- unifies dataset input and evaluation frameworks/metrics
- `agents/` directory encourages researchers to submit their training code to the repository to share with others
- aids reproducibility
End goal is general dialogue, which includes many different skills
- seamlessly combines simulated and real language tasks
- encourages multi-task model development & evaluation
- helps to reduce overfitting of models to specific datasets
End goal is real dialogue with people
- train and evaluate on live dialogue with humans via Mechanical Turk
- easy setup for connecting turkers with your dialogue agent
- allow to compare different research groups turk experiments
Set of datasets to bootstrap a working dialogue model for human interaction
- motivates building new datasets that will go in the repository
## Properties
- All datasets look like natural dialogue: a single format / API.
- Both fixed datasets (conversation logs) and interactive (online/RL) tasks.
- Both real and simulated tasks.
- Supports other media, e.g. visual in VQA.
- Can use Mechanical Turk to run / collect data / evaluate.
- Python framework.
- Examples of training with PyTorch.
- Uses zmq to talk to other toolboxes not in Python, examples of Lua Torch given.
- Supports batch and hogwild training and evaluation of models.
## Basic Examples
Note: If any of these examples fail, check the [requirements section](#requirements) to see if you have missed something.
Display 10 random examples from task 1 of the "1k training examples" bAbI task:
```bash
python examples/display_data.py -t babi:task1k:1
```
Displays 100 random examples from multi-tasking on the bAbI task and the SQuAD dataset at the same time:
```bash
python examples/display_data.py -t babi:task1k:1,squad -ne 100
```
Evaluate on the bAbI test set with a human agent (using the local keyboard as input):
```bash
python examples/eval_model.py -m local_human -t babi:Task1k:1 -dt valid
```
Evaluate an IR baseline model on the validation set of the Movies Subreddit dataset:
```bash
python examples/eval_model.py -m ir_baseline -t "#moviedd-reddit" -dt valid
```
Display the predictions of that same IR baseline model:
```bash
python examples/display_model.py -m ir_baseline -t "#moviedd-reddit" -dt valid
```
Train a seq2seq model on the "10k training examples" bAbI task 1 with batch size of 32 examples until accuracy reaches 95% on validation (requires pytorch):
```bash
python examples/train_model.py -t babi:task10k:1 -m seq2seq -mf /tmp/model_s2s -bs 32 -vtim 30 -vcut 0.95
```
Trains an attentive LSTM model on the SQuAD dataset with a batch size of 32 examples (pytorch and regex):
```bash
python examples/train_model.py -m drqa -t squad -bs 32 -mf /tmp/model_drqa
```
Tests an existing attentive LSTM model (DrQA reader) on the SQuAD dataset from our model zoo:
```bash
python examples/eval_model.py -t squad -mf "models:drqa/squad/model"
```
## Requirements
ParlAI currently requires Python3.
Dependencies of the core modules are listed in requirement.txt.
Some models included (in parlai/agents) have additional requirements.
## Installing ParlAI
Run the following commands to clone the repository and install ParlAI:
```bash
git clone https://github.com/facebookresearch/ParlAI.git ~/ParlAI
cd ~/ParlAI; python setup.py develop
```
This will link the cloned directory to your site-packages.
This is the recommended installation procedure, as it provides ready access to the examples and allows you to modify anything you might need. This is especially useful if you if you want to submit another task to the repository.
All needed data will be downloaded to ~/ParlAI/data, and any non-data files (such as the MemNN code) if requested will be downloaded to ~/ParlAI/downloads. If you need to clear out the space used by these files, you can safely delete these directories and any files needed will be downloaded again.
## Worlds, agents and teachers
The main concepts (classes) in ParlAI:
- world - defines the environment (can be very simple, just two agents talking to each other).
- agent – an agent in the world, e.g. the learner. (There can be multiple learners.)
- teacher – a type of agent that talks to the learner, implements one of the
listed before.
After defining a world and the agents in it, a main loop can be run for training, testing or displaying, which calls the function world.parley(). The skeleton of an example main is given in the left panel, and the actual code for parley() on the right.
<p align=center><img width="100%" src="docs/source/\_static/img/main.png" /></p>
## Actions and Observations
All agents (including teachers) speak to each other with a single format -- the observation/action object (a python dict).
This is used to pass text, labels, rewards, and more between agents.
It’s the same object type when talking (acting) or listening (observing), but a different view (i.e. with different values in the fields).
The observation/action dict fields are as follows (or see [the documentation](http://parl.ai/static/docs/observations.html)):
<p align=center><img width="33%" src="docs/source/\_static/img/act-obs-dict.png" /></p>
Each of these fields are technically optional, depending on your dataset, though the 'text' field will most likely be used in nearly all exchanges.
Note: during validation and testing, the labels field is renamed eval_labels – this way, the model won’t accidentally train on the labels, but they are still available for calculating model-side loss.
For a fixed supervised learning dataset like bAbI, a typical exchange from the training set might be as follows (the test set would not include labels):
```python
Teacher: {
'text': 'Sam went to the kitchen\nPat gave Sam the milk\nWhere is the milk?',
'labels': ['kitchen'],
'label_candidates': ['hallway', 'kitchen', 'bathroom'],
'episode_done': False
}
Student: {
'text': 'hallway'
}
Teacher: {
'text': 'Sam went to the hallway\nPat went to the bathroom\nWhere is the milk?',
'labels': ['hallway'],
'label_candidates': ['hallway', 'kitchen', 'bathroom'],
'episode_done': True
}
Student: {
'text': 'hallway'
}
Teacher: {
... # starts next episode
}
...
```
## Code
The code is set up into several main directories:
- **core**: contains the primary code for the framework
- **agents**: contains agents which can interact with the different tasks (e.g. machine learning models)
- **examples**: contains a few basic examples of different loops (building dictionary, train/eval, displaying data)
- **tasks**: contains code for the different tasks available from within ParlAI
- **mturk**: contains code for setting up Mechanical Turk, as well as sample MTurk tasks
- **messenger**: contains code for interfacing with Facebook Messenger
- **zoo**: contains code to directly download and use pretrained models from our model zoo
Each directory is described in more detail below, ordered by dependencies.
### Core
The core library contains the following files:
- **agents.py**: this file contains a few basic agents which can be extended by your own model
- **_Agent_**: base class for all other agents, implements the act() method which receives an observation table and returns a table in response
- **_Teacher_**: child of Agent, also implements the report method for returning metrics. Tasks implement the Teacher class
- **_MultiTaskTeacher_**: creates a set of teachers based on a "task string" passed to the Teacher, creating multiple teachers within it and alternating between them
- create_task_teacher: instantiate a teacher from a given task string (e.g. 'babi:task:1' or 'squad')
- **build_data.py**: basic utilities for setting up data for tasks. you can override if your filesystem needs different functionality.
- **dict.py**: contains code for building general NLP-style dictionaries from observations
- DictionaryAgent: agent which tracks the index and frequency of words in a dictionary, and can parse a sentence into indices into its dictionary or back
- **metrics.py**: computes evaluation metrics for dialog, e.g. ranking metrics, etc.
- **params.py**: uses argparse to interpret command line arguments for ParlAI
- **teachers.py**: contains teachers that deal with dialog-based tasks, as well as data classes for storing data
- **_FixedDialogTeacher_**: base class for a teacher that utilizes fixed data
- **_DialogTeacher_**: base class for a teacher doing dialog with fixed chat logs
- **_FbDialogTeacher_**: a teacher that implements a function `setup_data` that parses data in the FB Dialog data format
- **thread_utils.py**: utility classes/functions for use in Hogwild multithreading (multiprocessing)
- SharedTable: provides a lock-protected, shared-memory, dictionary-like interface for keeping track of metrics
- **worlds.py**: contains a set of basic worlds for tasks to take place inside
- **_World_**: base class for all other worlds, implements `parley`, `shutdown`, `__enter__`, and `__exit__`
- **_DialogPartnerWorld_**: default world for turn-based two-agent communication
- **_MultiAgentDialogWorld_**: round-robin turn-based agent communication for two or more agents
- **_HogwildWorld_**: default world for setting up a separate world for every thread when using multiple threads (processes)
### Agents
The agents directory contains agents that have been approved into the ParlAI framework for shared use.
We encourage you to contribute new ones!
Some agents currently available within [this directory](https://github.com/facebookresearch/ParlAI/tree/master/parlai/agents):
- **drqa**: an attentive [LSTM model DrQA](https://arxiv.org/abs/1704.00051) implemented in PyTorch that has competitive results on the SQuAD dataset amongst others.
- **fairseq**: [an attentive sequence to sequence model using convolutions](https://arxiv.org/abs/1705.03122)
- **seq2seq**: a generic seq2seq model with various options
- **ibm_seq2seq**: IBM sequence to sequence model
- **language_model**: an RNN language model
- **memnn**: code for an end-to-end memory network
- **mlb_vqa**: a visual question answering model based on [this paper](https://arxiv.org/abs/1610.04325)
- **starspace**: a simple supervised embedding approach which is a strong baseline based on [this paper](https://arxiv.org/abs/1709.03856).
- **tfidf_retriever** a simple retrieval based model, also useful as a first step for retrieving information as input to another model.
- **ir_baseline**: simple information retrieval baseline that scores candidate responses with [TFIDF-weighted](https://en.wikipedia.org/wiki/Tf%E2%80%93idf) matching
- **repeat_label**: basic class for merely repeating all data sent to it (e.g. for piping to a file, debugging)
- **remote_agent**: basic class for any agent connecting over ZMQ
- **local_human**: takes input from the keyboard as the act() function of the agent, so a human can act in the environment
See the [directory](https://github.com/facebookresearch/ParlAI/tree/master/parlai/agents) for the complete list.
### Examples
[This directory](https://github.com/facebookresearch/ParlAI/tree/master/examples) contains a few particular examples of basic loops.
- base_train.py: _very simple example shows the outline of a training/validation loop using the default Agent parent class_
- display_data.py: _uses agent.repeat_label to display data from a particular task provided on the command-line_
- display_model.py: _shows the predictions of a provided model on a particular task provided on the command-line_
- eval_model.py: _uses the named agent to compute evaluation metrics data for a particular task provided on the command-line_
- build_dict.py: _build a dictionary from a particular task provided on the command-line using core.dict.DictionaryAgent_
### Tasks
Our first release included the following datasets (shown in the left panel), and accessing one of them is as simple as specifying the name of the task as a command line option, as shown in the dataset display utility (right panel):
<p align=center><img width="100%" src="docs/source/\_static/img/tasks2.png" /></p>
Over 20 tasks were supported in the first release, including popular datasets such as
SQuAD, bAbI tasks, MCTest, WikiQA, WebQuestions, SimpleQuestions, WikiMovies, QACNN, QADailyMail, CBT, BookTest, bAbI Dialog tasks,
Ubuntu, OpenSubtitles, Cornell Movie, VQA-COCO2014.
Since then, several datasets have been added such as VQAv2, VisDial, MNIST_QA, Personalized Dialog, InsuranceQA, MS MARCO, TriviaQA, and CLEVR. See [here](https://github.com/facebookresearch/ParlAI/blob/master/parlai/tasks/task_list.py) for the current complete task list.
Choosing a task in ParlAI is as easy as specifying it on the command line, as shown in the above image (right). If the dataset has not been used before, ParlAI will automatically download it. As all datasets are treated in the same way in ParlAI (with a single dialog API), a dialog agent can in principle switch training and testing between any of them. Even better, one can specify many tasks at once (multi-tasking) by simply providing a comma-separated list, e.g. the command line “-t babi,squad”, to use those two datasets, or even all the QA datasets at once (-t #qa) or indeed every task in ParlAI at once (-t #all). The aim is to make it easy to build and evaluate very rich dialog models.
Each task folder contains:
- **build.py** file for setting up data for the task (downloading data, etc, only done the first time requested, and not downloaded if the task is not used).
- **agents.py** file which contains default or special teacher classes used by core.create_task to instantiate these classes from command-line arguments (if desired).
- **worlds.py** file can optionally be added for tasks that need to define new/complex environments.
To add your own task, see the [tutorial](http://www.parl.ai/static/docs/tutorial_task.html).
### MTurk
An important part of ParlAI is seamless integration with Mechanical Turk for data collection, training and evaluation.
Human Turkers are also viewed as agents in ParlAI and hence person-person, person-bot, or multiple people and bots in group chat can all converse within the standard framework, switching out the roles as desired with no code changes to the agents. This is because Turkers also receive and send via a (pretty printed) version of the same interface, using the fields of the observation/action dict.
We currently provide three examples: collecting data, human evaluation of a bot, and round-robin chat between local humans and remote Turkers.
<p align=center><img width="100%" src="docs/source/\_static/img/mturk.png" /></p>
The mturk library contains the following directories:
- **core**: this directory contains the core code for setting up AWS backend that supports the MTurk chat interface, code for HIT creation and approval, and the wrapper class `MTurkAgent` which encapsulates the MTurk interface into a standard `Agent` class.
- **tasks**: this directory contains three sample MTurk tasks.
- **_qa\_data\_collection_**: get questions and answers from turkers, given a random paragraph from SQuAD.
- **_model\_evaluator_**: ask turkers to evaluate the information retrieval baseline model on the Reddit movie dialog dataset.
- **_multi\_agent\_dialog_**: round-robin chat between two local human agents and two Turkers.
To run an MTurk task:
- Go into the directory for the task you want to run.
- Run `python run.py -nh <num_hits> -na <num_assignments> -r <reward> [--sandbox]/[--live]`, with `<num_hits>`, `<num_assignments>` and `<reward>` set appropriately. Use `--sandbox` to run the task in MTurk sandbox mode before pushing it live.
To add your own MTurk task:
- create a new folder within the mturk/tasks directory for your new task
- implement __task\_config.py__, with at least the following fields in the `task_config` dictionary:
- `hit_title`: a short and descriptive title about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned.
- `hit_description`: a description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens.
- `hit_keywords`: one or more words or phrases that describe the HIT, separated by commas. On MTurk website, these words are used in searches to find HITs.
- `task_description`: a detailed task description that will be shown on the HIT task preview page and on the left side of the chat page. Supports HTML formatting.
- implement __run.py__, with code for setting up and running the world where `MTurkAgent` lives in.
- (Optional) implement __worlds.py__, with a world class that extends from `World`.
Please see [the MTurk tutorial](http://parl.ai/static/docs/mturk.html) to learn more about the MTurk examples and how to create and run your own task.
### Messenger
Please see [the Facebook Messenger tutorial](http://parl.ai/static/docs/tutorial_messenger.html) to learn more about how to use ParlAI with Facebook Messenger.
## Support
If you have any questions, bug reports or feature requests, please don't hesitate to post on our [Github Issues page](https://github.com/facebookresearch/ParlAI/issues).
## The Team
ParlAI is currently maintained by Emily Dinan, Alexander H. Miller, Stephen Roller, Kurt Shuster, Jack Urbanek and Jason Weston.
A non-exhaustive list of other major contributors includes:
Will Feng, Adam Fisch, Jiasen Lu, Antoine Bordes, Devi Parikh, Dhruv Batra,
Filipe de Avila Belbute Peres and Chao Pan.
## Citation
Please cite the [arXiv paper](https://arxiv.org/abs/1705.06476) if you use ParlAI in your work:
```
@article{miller2017parlai,
title={ParlAI: A Dialog Research Software Platform},
author={{Miller}, A.~H. and {Feng}, W. and {Fisch}, A. and {Lu}, J. and {Batra}, D. and {Bordes}, A. and {Parikh}, D. and {Weston}, J.},
journal={arXiv preprint arXiv:{1705.06476}},
year={2017}
}
```
## License
ParlAI is BSD-licensed. We also provide an additional patent grant.
| /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/README.md | 0.46563 | 0.987735 | README.md | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.worlds import validate
from parlai.mturk.core.worlds import MTurkOnboardWorld, MTurkTaskWorld
import parlai.mturk.core.mturk_utils as mturk_utils
import random
class QualificationFlowOnboardWorld(MTurkOnboardWorld):
def parley(self):
ad = {}
ad['id'] = 'System'
ad['text'] = (
'This demo displays the functionality of using qualifications to '
'filter the workers who are able to do your tasks. The first task '
'you will get will check to see if you pass the bar that the task '
'requires against a prepared test set. If you pass, the next task '
'will be a real one rather than the test one.'
'\n'
'Send anything to get started.'
)
self.mturk_agent.observe(ad)
self.mturk_agent.act()
self.episodeDone = True
class QualificationFlowSoloWorld(MTurkTaskWorld):
"""
World that asks a user 5 math questions, first from a test set if the user
is entering for the first time, and then randomly for all subsequent times
Users who don't get enough correct in the test set are assigned a
qualification that blocks them from completing more HITs during shutdown
Demos functionality of filtering workers with just one running world.
Similar results could be achieved by using two worlds where the first acts
as just a filter and gives either a passing or failing qualification. The
second would require the passing qualification. The first world could then
be runnable using the --unique flag.
"""
test_set = [
['What is 1+1?', '2'],
['What is 3+2?', '5'],
['What is 6+6?', '12'],
['What is 5-3?', '2'],
['What is 6*4?', '24'],
]
collector_agent_id = 'System'
def __init__(self, opt, mturk_agent, qualification_id, firstTime):
self.mturk_agent = mturk_agent
self.firstTime = firstTime
if not firstTime:
self.questions = self.generate_questions(5)
else:
self.questions = self.test_set
self.episodeDone = False
self.correct = 0
self.curr_question = 0
self.qualification_id = qualification_id
self.opt = opt
def generate_questions(self, num):
questions = []
for _ in range(num):
num1 = random.randint(1, 20)
num2 = random.randint(3, 16)
questions.append([
'What is {} + {}?'.format(num1, num2),
'{}'.format(num1 + num2)
])
return questions
def parley(self):
if self.curr_question == len(self.questions):
ad = {
'episode_done': True,
'id': self.__class__.collector_agent_id,
'text': 'Thank you for your answers!',
}
self.mturk_agent.observe(validate(ad))
self.episodeDone = True
else:
ad = {
'episode_done': True,
'id': self.__class__.collector_agent_id,
'text': self.questions[self.curr_question][0],
}
self.mturk_agent.observe(validate(ad))
answer = self.mturk_agent.act()
if answer == self.questions[self.curr_question][1]:
self.correct += 1
self.curr_question += 1
def episode_done(self):
return self.episodeDone
def report(self):
pass
def shutdown(self):
"""
Here is where the filtering occurs. If a worker hasn't successfully
answered all the questions correctly, they are given the qualification
that marks that they should be blocked from this task.
"""
if self.firstTime and self.correct != len(self.questions):
mturk_utils.give_worker_qualification(
self.mturk_agent.worker_id,
self.qualification_id,
is_sandbox=self.opt['is_sandbox'],
)
self.mturk_agent.shutdown()
def review_work(self):
pass | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/mturk/tasks/qualification_flow_example/worlds.py | 0.688049 | 0.280857 | worlds.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.worlds import validate
from parlai.mturk.core.worlds import MTurkOnboardWorld, MTurkTaskWorld
class QADataCollectionOnboardWorld(MTurkOnboardWorld):
'''Example onboarding world. Sends a message from the world to the
worker and then exits as complete
'''
def parley(self):
ad = {}
ad['id'] = 'System'
ad['text'] = 'Welcome onboard!'
self.mturk_agent.observe(ad)
self.mturk_agent.act()
self.episodeDone = True
class QADataCollectionWorld(MTurkTaskWorld):
"""
World for recording a turker's question and answer given a context.
Assumes the context is a random context from a given task, e.g.
from SQuAD, CBT, etc.
"""
collector_agent_id = 'QA Collector'
def __init__(self, opt, task, mturk_agent):
self.task = task
self.mturk_agent = mturk_agent
self.episodeDone = False
self.turn_index = -1
self.context = None
self.question = None
self.answer = None
def parley(self):
# Each turn starts from the QA Collector agent
self.turn_index = (self.turn_index + 1) % 2
ad = {'episode_done': False}
ad['id'] = self.__class__.collector_agent_id
if self.turn_index == 0:
# At the first turn, the QA Collector agent provides the context
# and prompts the turker to ask a question regarding the context
# Get context from SQuAD teacher agent
qa = self.task.act()
self.context = '\n'.join(qa['text'].split('\n')[:-1])
# Wrap the context with a prompt telling the turker what to do next
ad['text'] = (self.context +
'\n\nPlease provide a question given this context.')
self.mturk_agent.observe(validate(ad))
self.question = self.mturk_agent.act()
# Can log the turker's question here
if self.turn_index == 1:
# At the second turn, the QA Collector collects the turker's
# question from the first turn, and then prompts the
# turker to provide the answer
# A prompt telling the turker what to do next
ad['text'] = 'Thanks. And what is the answer to your question?'
ad['episode_done'] = True # end of episode
self.mturk_agent.observe(validate(ad))
self.answer = self.mturk_agent.act()
self.episodeDone = True
def episode_done(self):
return self.episodeDone
def shutdown(self):
self.task.shutdown()
self.mturk_agent.shutdown()
def review_work(self):
# Can review the work here to accept or reject it
pass
def get_custom_task_data(self):
# brings important data together for the task, to later be used for
# creating the dataset. If data requires pickling, put it in a field
# called 'needs-pickle'.
return {
'context': self.context,
'acts': [self.question, self.answer],
} | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/mturk/tasks/qa_data_collection/worlds.py | 0.73914 | 0.24768 | worlds.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
task_config = {}
"""A short and descriptive title about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT title appears in search results,
and everywhere the HIT is mentioned.
"""
task_config['hit_title'] = 'Ask and answer a question about a paragraph'
"""A description includes detailed information about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT description appears in the expanded
view of search results, and in the HIT and assignment screens.
"""
task_config['hit_description'] = 'Ask and answer a question about a paragraph.'
"""One or more words or phrases that describe the HIT, separated by commas.
On MTurk website, these words are used in searches to find HITs.
"""
task_config['hit_keywords'] = 'chat,question,answer'
"""A detailed task description that will be shown on the HIT task preview page
and on the left side of the chat page. Supports HTML formatting.
"""
task_config['task_description'] = \
'''\'\'\'
In this task, you will need to ask a question about a paragraph, and then provide your own answer to it.<br><br>
Example:<br><br>
------------------- Task Begin ------------------- <br><br>
<b>QA Collector</b>:<br>
New Haven\'s greatest culinary claim to fame may be its pizza, which has been claimed to be among the best in the country, or even in the world. New Haven-style pizza, called "apizza" (pronounced ah-BEETS, [a'pitts] in the original Italian dialect), made its debut at the iconic Frank Pepe Pizzeria Napoletana (known as Pepe\'s) in 1925. Apizza is baked in coal- or wood-fired brick ovens, and is notable for its thin crust. Apizza may be red (with a tomato-based sauce) or white (with a sauce of garlic and olive oil), and pies ordered "plain" are made without the otherwise customary mozzarella cheese (originally smoked mozzarella, known as "scamorza" in Italian). A white clam pie is a well-known specialty of the restaurants on Wooster Street in the Little Italy section of New Haven, including Pepe\'s and Sally\'s Apizza (which opened in 1938). Modern Apizza on State Street, which opened in 1934, is also well-known.<br><br>Please provide a question given this context.<br><br>
<b>Worker</b>:<br>
What is apizza baked in?<br><br>
<b>QA Collector</b>:<br>
Thanks. And what is the answer to your question?<br><br>
<b>Worker</b>:<br>
It's baked in coal- or wood-fired brick ovens.<br><br>
------------------- Task Done ------------------- <br><br>
If you are ready, please click "Accept HIT" to start this task.
\'\'\'''' | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/mturk/tasks/qa_data_collection/task_config.py | 0.698124 | 0.568895 | task_config.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
task_config = {}
"""A short and descriptive title about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT title appears in search results,
and everywhere the HIT is mentioned.
"""
task_config['hit_title'] = 'Give a rating to a dialog between two people'
"""A description includes detailed information about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT description appears in the expanded
view of search results, and in the HIT and assignment screens.
"""
task_config['hit_description'] = 'Give a rating to a dialog between two people.'
"""One or more words or phrases that describe the HIT, separated by commas.
On MTurk website, these words are used in searches to find HITs.
"""
task_config['hit_keywords'] = 'chat,dialog,rating'
"""A detailed task description that will be shown on the HIT task preview page
and on the left side of the chat page. Supports HTML formatting.
"""
task_config['task_description'] = \
'''\'\'\'
In this task, you are going to read a dialog between two people, and you will need to give a rating on how good the response is.<br><br>
Example:<br><br>
------------------- Task Begin ------------------- <br><br>
<b>Model Evaluator</b>:<br>
This is the author of the article . These were my picks and it 's an opinion . I did say Quantum was mediocre to bad and it 's because the trailer is so incredible and Casino Royale was so great that it was a let down . Also are you really gon na say Phantom Menace wasnt a terrible movie that had a great trailer .<br><br>
How would you rate the following response (from 0 to 10):<br><br>
True its an opinion as is my comment . I 'd say quantum of solace was meh , bland . But it had one of the best bond villains around . As for phantom menace , I 'd say it gets far more hate than it deserves . Did I personally enjoy it ? Yes . Was it a good movie ? Not especially . Did it live up to the hype ? God no ? Was it terrible ? Not even close . Attack of the clones on the other hand , that was dreck .<br><br>
<b>Worker</b>:<br>
8<br><br>
------------------- Task Done ------------------- <br><br>
If you are ready, please click "Accept HIT" to start this task.
\'\'\'''' | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/mturk/tasks/model_evaluator/task_config.py | 0.747339 | 0.471102 | task_config.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import json
import os
import random
import time
import copy
import numpy as np
import pickle
from joblib import Parallel, delayed
from parlai.core.worlds import MultiAgentDialogWorld
from parlai.mturk.core.agents import MTURK_DISCONNECT_MESSAGE
from parlai.mturk.core.worlds import MTurkOnboardWorld
def _agent_shutdown(agent, timeout):
agent.shutdown(timeout=timeout)
class TalkTheWalkWorld(MultiAgentDialogWorld):
"""A world where messages from agents can be interpreted as _actions_ in the
world which result in changes in the environment (are executed). Hence a
grounded simulation can be implemented rather than just dialogue between
agents.
"""
def __init__(self, opt, agents=None, shared=None, world_tag='[NONE]'):
super().__init__(opt, agents, shared)
self.dir = os.path.dirname(os.path.abspath(__file__))
self.task_type = 'sandbox' if opt['is_sandbox'] else 'live'
self.world_tag = world_tag
self.replay = opt.get('replay', False)
self.real_time = opt.get('real_time', False)
self.replay_bot = opt.get('replay_bot', False)
self.bot_type = opt.get('bot_type', 'discrete')
self.logs_file = opt.get('replay_log_file')
self.actions = ["ACTION:TURNLEFT",
"ACTION:TURNRIGHT",
"ACTION:FORWARD"]
self.start_location = None
self.location = None
self.target_location = None
self.orientations = ['N', 'E', 'S', 'W']
self.neighborhoods = ['hellskitchen',
'williamsburg',
'fidi',
'eastvillage']
self.boundaries = {}
self.boundaries['hellskitchen'] = [3, 3]
self.boundaries['williamsburg'] = [2, 8]
self.boundaries['eastvillage'] = [3, 4]
self.boundaries['fidi'] = [2, 3]
self.steps = {}
self.steps['N'] = [0, 1]
self.steps['E'] = [1, 0]
self.steps['S'] = [0, -1]
self.steps['W'] = [-1, 0]
self.min_x, self.min_y, self.max_x, self.max_y = None, None, None, None
self.landmarks = []
self.neighborhood = None
self.start_time = time.time()
self.total_time = None
self.num_evaluations = 0
self.round = 0
self.status = None
self.episodeDone = False
self.acts = []
if self.replay:
self.load_data()
self.load_world(opt['world_idx'])
self.start_idx = opt.get('start_idx')
else:
self.init_world()
def load_data(self):
"""Load the data for replaying a dialog"""
data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
self.logs_file)
with open(data_path) as f:
self.data = json.load(f)
def load_world(self, world_idx):
"""Loads a world into the task when replaying data"""
if world_idx == -1:
success_worlds = []
best_world_len = 1000
best_world_idx = 1000
for world_idx in range(len(self.data)):
if not self.replay_bot:
if 40 < len(self.data[world_idx]['dialog']) < 120:
break
else:
world = copy.deepcopy(self.data[world_idx])
is_success, length = self.is_world_success(world)
if is_success:
success_worlds.append((world_idx, length))
if len(world['dialog']) < best_world_len:
best_world_idx = world_idx
best_world_len = length
print(success_worlds)
world_idx = best_world_idx
print(world_idx, len(self.data[world_idx]['dialog']))
self.world_idx = world_idx
world = self.data[world_idx]
self.loaded_world = world
self.neighborhood = world['neighborhood']
self.target_location = world['target_location']
self.start_location = world['start_location']
self.location = self.start_location
self.landmarks = world['landmarks']
self.replay_acts = world['dialog']
self.boundaries = world['boundaries']
self.min_x, self.min_y, self.max_x, self.max_y = self.boundaries
self.send_location(self.agents[0])
self.send_map(self.agents[1])
def init_world(self):
"""Initializes a new world for the dialog"""
# first sample neighborhood
neighborhood_ind = random.randint(0, len(self.neighborhoods) - 1)
self.neighborhood = self.neighborhoods[neighborhood_ind]
# Sample 2x2 grid in neighborhood
self.min_x = random.randint(0, self.boundaries[self.neighborhood][0]) * 2
self.min_y = random.randint(0, self.boundaries[self.neighborhood][1]) * 2
self.max_x = self.min_x + 3
self.max_y = self.min_y + 3
self.location = [random.randint(self.min_x, self.max_x),
random.randint(self.min_y, self.max_y),
random.randint(0, 3)] # x, y, orientation idx
self.target_location = [random.randint(self.min_x, self.max_x),
random.randint(self.min_y, self.max_y),
random.randint(0, 3)] # x, y, orientation idx
self.start_location = [self.location[0],
self.location[1],
self.location[2]]
map_f = os.path.join(self.dir, '{}_map.json'.format(self.neighborhood))
with open(map_f) as f:
data = json.load(f)
for landmark in data:
if (
landmark['x'] * 2 >= self.min_x and
landmark['x'] * 2 <= self.max_x and
landmark['y'] * 2 >= self.min_y and
landmark['y'] * 2 <= self.max_y
):
self.landmarks.append(landmark)
self.send_location(self.agents[0])
self.send_map(self.agents[1])
def update_location(self, act):
"""Updates the tourist's location based on an action"""
if act == "ACTION:TURNLEFT":
self.location[2] = (self.location[2] - 1) % 4
if act == "ACTION:TURNRIGHT":
self.location[2] = (self.location[2] + 1) % 4
if act == "ACTION:FORWARD":
orientation = self.orientations[self.location[2]]
self.location[0] += self.steps[orientation][0]
self.location[1] += self.steps[orientation][1]
self.location[0] = max(min(self.location[0], self.max_x),
self.min_x)
self.location[1] = max(min(self.location[1], self.max_y),
self.min_y)
def send_location(self, agent):
"""Sends the current location to the given agent"""
msg = {'id': "WORLD_LOCATION",
'text': {'location': self.location,
'boundaries': [self.min_x,
self.min_y,
self.max_x,
self.max_y],
'neighborhood': self.neighborhood}}
agent.observe(msg)
def send_map(self, agent):
"""Sends the world map to the given agent"""
msg = {'id': "WORLD_MAP",
'text': {'landmarks': self.landmarks,
'target': self.target_location,
'boundaries': [self.min_x,
self.min_y,
self.max_x,
self.max_y]}}
agent.observe(msg)
def is_action(self, msg):
"""Returns whether a message is an action from the Tourist"""
return msg in self.actions
def episode_done(self):
return self.episodeDone
def timeout(self, agent):
self.status = 'timeout'
self.causal_agent_id = agent.id
msg = {'id': "WORLD_TIMEOUT",
'text': ''}
agent.observe(msg)
for other_agent in self.agents:
if other_agent.id != agent.id:
msg = {'id': 'WORLD_PARTNER_TIMEOUT',
'text': ''}
other_agent.observe(msg)
def is_world_success(self, world):
"""Determines whether a given world/dialog yielded a successful
run of the task. Used when loading a world from data for replay.
"""
target_location = world['target_location']
start_location = world['start_location']
location = start_location
replay_acts = world['dialog']
min_x, min_y, max_x, max_y = world['boundaries']
num_evaluations = 0
last_grid = None
def evaluate_location(num_evals, location, target):
if num_evals == 3:
return num_evals, False, True
num_evals += 1
return (num_evals,
(location[0] == target[0] and location[1] == target[1]),
False)
def update_location(act, loc, mi_x, ma_x, mi_y, ma_y):
if act == "ACTION:TURNLEFT":
loc[2] = (loc[2] - 1) % 4
if act == "ACTION:TURNRIGHT":
loc[2] = (loc[2] + 1) % 4
if act == "ACTION:FORWARD":
orientation = self.orientations[loc[2]]
loc[0] += self.steps[orientation][0]
loc[1] += self.steps[orientation][1]
loc[0] = max(min(loc[0], ma_x), mi_x)
loc[1] = max(min(loc[1], ma_y), mi_y)
return loc
for kk, act in enumerate(replay_acts):
if self.is_action(act['text']):
location = update_location(act['text'],
location,
min_x,
max_x,
min_y,
max_y)
elif act['text'] == 'EVALUATE_LOCATION':
num_evals, done, too_many = evaluate_location(num_evaluations,
location,
target_location)
if done:
max_prob = 0
max_i_j = None
for i in range(len(last_grid)):
for j in range(len(last_grid[i])):
if last_grid[i][j] > max_prob:
max_i_j = (i, j)
max_prob = last_grid[i][j]
if max_i_j != (location[0] - min_x, location[1] - min_y):
return False, -1
high_prob = any(any(k >= 0.50 for k in j)
for j in last_grid)
max_prob = max(max(j) for j in last_grid)
return (True and high_prob, kk)
elif too_many:
return False, -1
elif act['id'] == 'Guide':
last_grid = act['text']
return False, -1
def replay_actions(self):
"""Replays a loaded dialog in the mturk interface"""
tourist = self.agents[0]
guide = self.agents[1]
cur_time = None
actions = []
start = self.start_idx
time.sleep(5)
for i in range(start, len(self.replay_acts)):
act = self.replay_acts[i]
if self.real_time:
if cur_time is None:
cur_time = act['time']
else:
elapsed = act['time'] - cur_time
if not self.is_action(elapsed):
elapsed *= 0.75
if not self.real_time:
elapsed = min(elapsed, 2)
time.sleep(elapsed)
cur_time = act['time']
else:
time.sleep(2)
if self.is_action(act['text']):
self.update_location(act['text'])
act['id'] = 'ACTION'
tourist.observe(act)
act['id'] = 'Tourist'
actions.append(act)
continue
if act['text'] == 'EVALUATE_LOCATION':
done = self.evaluate_location()
if done:
self.episodeDone = True
return
else:
if self.replay_bot:
if act['id'] == 'Tourist' and self.bot_type != 'natural':
text = act['text']
act['text'] = text[:16]
elif act['id'] == 'Guide':
grid = act['text']
old_grid = np.array(grid)
sizes = [9, 19, 39]
for i in sizes:
new_grid = self.construct_expanded_array(old_grid, i)
old_grid = new_grid
act['attn_grid'] = new_grid[:37, :37].tolist()
act['attn_grid_size'] = sizes[-1] - 2
binary_grid = ''
mean = np.mean(np.array(grid))
for i in range(len(grid)):
for j in range(len(grid[i])):
num = int(grid[i][j] > mean)
binary_grid += str(num)
act['show_grid'] = True
act['text'] = binary_grid
guide.observe(act)
if 'attn_grid' in act:
act['attn_grid'] == []
tourist.observe(act)
self.episodeDone = True
def construct_expanded_array(self, grid, size):
"""Constructing a larger attention grid when replaying actions.
Used when displaying the heat map for the Guide.
"""
new_grid = np.full((size, size), -1.0)
new_grid = self.fill_initial(new_grid, grid, size)
new_grid = self.fill_neighbors(new_grid, size)
return new_grid
def neighbor_coords(self, cell, max_size):
x, y = cell
X = Y = max_size
return [(x2, y2) for x2 in range(x - 1, x + 2)
for y2 in range(y - 1, y + 2)
if (-1 < x < X and
-1 < y < Y and
(x != x2 or y != y2) and
(0 <= x2 < X) and
(0 <= y2 < Y))]
def fill_initial(self, new_g, old_g, size):
for i in (0, size - 1):
for j in (range(size)):
new_g[i, j] = 0
for j in (0, size - 1):
for i in range(size):
new_g[i, j] = 0
for i in range(1, size, 2):
for j in range(1, size, 2):
new_g[i, j] = old_g[(i - 1) // 2, (j - 1) // 2]
for i in range(1, size - 1, 2):
for j in range(2, size - 1, 2):
new_g[i, j] = (new_g[i, j - 1] + new_g[i, j + 1]) / 2
for i in range(2, size - 1, 2):
for j in range(1, size - 1, 2):
new_g[i, j] = (new_g[i - 1, j] + new_g[i + 1, j]) / 2
return new_g
def fill_neighbors(self, grid, size):
for i in range(size):
for j in range(size):
if grid[i, j] == -1:
neighbors = self.neighbor_coords((i, j), size)
neighbor_sum = sum((grid[k, l] for k, l in neighbors))
grid[i, j] = neighbor_sum / len(neighbors)
return grid
def parley(self):
if self.replay:
self.replay_actions()
return
while True:
# Tourist
agent = self.agents[0]
act = agent.act(blocking=False)
if act:
act['time'] = time.time()
if act['text'] == MTURK_DISCONNECT_MESSAGE:
# episode_done=true so conversations ends
self.status = 'disconnect'
self.episodeDone = True
break
if self.is_action(act['text']):
self.update_location(act['text'])
self.acts.append(act)
self.agents[1].observe(act)
# Guide
agent = self.agents[1]
act = agent.act(blocking=False)
if act:
act['time'] = time.time()
self.acts.append(act)
if act['text'] == MTURK_DISCONNECT_MESSAGE:
# episode_done=true so conversations ends
self.status = 'disconnect'
self.episodeDone = True
break
if act['text'] == 'EVALUATE_LOCATION':
done = self.evaluate_location()
if done:
self.episodeDone = True
break
else:
self.agents[0].observe(act)
time.sleep(0.1)
def evaluate_location(self):
self.num_evaluations += 1
success = (self.location[0] == self.target_location[0] and
self.location[1] == self.target_location[1])
if success:
print("SUCCESS!!")
self.status = 'success'
msg = {'id': 'WORLD_SUCCESS',
'text': ''}
for agent in self.agents:
agent.observe(msg)
return True
else:
self.status = 'failed'
if self.num_evaluations < 3:
msg = {
'id': 'Noah',
'text': 'Unfortunately, the Tourist is not at the '
'target location. You have {} attempt(s) left, '
'and you\'ll now receive a bonus of {}c upon '
'completion.'.format(
str(3 - self.num_evaluations),
str(40 - self.num_evaluations * 15)
),
}
for agent in self.agents:
agent.observe(msg)
return False
else:
msg = {'id': 'WORLD_FAIL',
'text': ''}
for agent in self.agents:
agent.observe(msg)
return True
def shutdown(self):
self.total_time = time.time() - self.start_time
Parallel(
n_jobs=len(self.agents), backend='threading'
)(delayed(_agent_shutdown)(agent, timeout=90) for agent in self.agents)
def review_work(self):
for agent in self.agents:
# Disonnects/timeouts are ignored because they never submit the HIT
agent.approve_work()
if self.status == 'success':
if self.num_evaluations == 1:
agent.pay_bonus(0.40)
elif self.num_evaluations == 2:
agent.pay_bonus(0.25)
elif self.num_evaluations == 3:
agent.pay_bonus(0.10)
def save(self):
"""Saves the state of the world"""
data_path = self.opt['data_path']
if not os.path.exists(data_path):
os.makedirs(data_path)
filename = os.path.join(
data_path,
'{}_{}_{}.pkl'.format(
time.strftime("%Y%m%d-%H%M%S"),
np.random.randint(0, 1000),
self.task_type))
data = {'neighborhood': self.neighborhood,
'start_location': self.start_location,
'target_location': self.target_location,
'location': self.location,
'status': self.status,
'dialog': self.acts,
'landmarks': self.landmarks,
'tourist_worker_id': self.agents[0].worker_id,
'tourist_assignment_id': self.agents[0].assignment_id,
'guide_worker_id': self.agents[1].worker_id,
'guide_assignment_id': self.agents[1].assignment_id,
'boundaries': [self.min_x, self.min_y, self.max_x, self.max_y],
'total_time': self.total_time,
'version': 1}
with open(filename, 'wb') as f:
pickle.dump(data, f)
print('{}: Data successfully saved at {}.'.format(self.world_tag,
filename))
class InstructionWorld(MTurkOnboardWorld):
def parley(self):
self.mturk_agent.act()
self.episodeDone = True | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/mturk/tasks/talkthewalk/worlds.py | 0.660063 | 0.182098 | worlds.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
task_config = {}
"""A short and descriptive title about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT title appears in search results,
and everywhere the HIT is mentioned.
"""
task_config['hit_title'] = 'Play a character and chat!'
"""A description includes detailed information about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT description appears in the expanded
view of search results, and in the HIT and assignment screens.
"""
task_config['hit_description'] = 'You will chat to another user while adopting a specific persona and then evaluate that user.'
"""One or more words or phrases that describe the HIT, separated by commas.
On MTurk website, these words are used in searches to find HITs.
"""
task_config['hit_keywords'] = 'chat,dialog'
"""A detailed task description that will be shown on the HIT task preview page
and on the left side of the chat page. Supports HTML formatting.
"""
task_config['task_description'] = \
"""
<br>
<b><h4>Task Description</h4></b>
<br>
(You can keep accepting new HITs after you finish your current one, so keep working on it if you like the task!)
<br>
<b>In this task you will chitchat with another user playing the part of a given character.</b>
For example, your given character could be: <br><br> I am a vegetarian. I like swimming. My father used to work for Ford. My favorite band is Maroon5. I got a new job last month, which is about advertising design.
<br>
<br>
Chat with the other user naturally and <b><span style="color:blue">try to get to know each other, i.e.
both ask questions and answer questions of your chat partner
at the same time sticking to your own characters<span style="color:blue"></b>.
<br>
<br>
<b><span style="color:blue">You will get bonus for high quality dialogs.</span></b>
<b>Send short messages, <span style="color:red">max 20 words</span>.</b>
<b>Do not trivially copy the character descriptions into the message.</b>
<br>
After a given number of turns, you will be asked to <b>briefly</b> rate your partner on metrics like <b>fluency, engagingness, and consistency</b>.
<br>
There is a <b>2 min</b> time limit for each turn.
<br>
<br>
- Do not reference the task or MTurk itself during the conversation.
<br>
<b><span style="color:red">- No racism, sexism or otherwise offensive comments, or the submission will be rejected and we will report to Amazon.</b></span>
<br>
<br>
Note: the user you are chatting with may be a human or a bot.
""" | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/mturk/tasks/convai2_model_eval/task_config.py | 0.763396 | 0.467028 | task_config.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
task_config = {}
"""A short and descriptive title about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT title appears in search results,
and everywhere the HIT is mentioned.
"""
task_config['hit_title'] = 'Rehprase a Character Description'
"""A description includes detailed information about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT description appears in the expanded
view of search results, and in the HIT and assignment screens.
"""
task_config['hit_description'] = (
'You will rephrase some character (persona) descriptions'
)
"""One or more words or phrases that describe the HIT, separated by commas.
On MTurk website, these words are used in searches to find HITs.
"""
task_config['hit_keywords'] = 'chat,dialog,text,game'
"""A detailed task description that will be shown on the HIT task preview page
and on the left side of the chat page. Supports HTML formatting.
"""
task_config['task_description'] = '''
In this task, we will show you 4 to 5 sentences with each of them describes
some person's characteristics.
<br>
Your jobs is to <b><span style="color:blue">rephrase the sentence to a new one
which is about a relative characteristic that the same person may
have.</span></b>
<br><br>
<b><span style="color:red">Do not trivially rephrase by copying the words in
the original sentences. Make a natural rephrasing.</span></b>
See examples below, BAD EXAMPLES have trivial word matching and weird
rephrasing.
<br>
<br>
<b><span style="color:blue">GOOD EXAMPLE</span></b>: rephrase "<b>My father
worked for Ford.</b>" to
"<b><span style="color:blue">My dad worked in the car industry.</span></b>"
<br>
<b><span style="color:red">BAD EXAMPLE</span></b>: rephrase "<b>My father
worked for Ford.</b>" to
"<b><span style="color:red">My dad was employed by Ford.</span></b>"
(trivial word matching like "Ford")
<br>
<br>
<b><span style="color:blue">GOOD EXAMPLE:</span></b> rephrase
"<b>I like basketball.</b>" to
"<b><span style="color:blue">I am big fan of Michael Jordan.</span></b>"
<br>
<b><span style="color:red">BAD EXAMPLE</span></b>: rephrase "<b>I like
basketball.</b>" to
"<b><span style="color:red">I am good at basketball.</span></b>"
(trivial word matching like "basketball")
<br>
<br>
<b><span style="color:blue">GOOD EXAMPLE:</span></b> rephrase "<b>I cannot
choose between lollipops and rainbows</b>" to
"<b><span style="color:blue">I like candies.</span></b>"
<br>
<b><span style="color:red">BAD EXAMPLE</span></b>: rephrase "<b>I cannot choose
between lollipops and rainbows</b>" to
"<b><span style="color:red">Suckers and brightly colored prism reflections are
two of my favorites</span></b>" (unnatural phrases like "prism reflections")
<br>
<br>
<b><span style="color:blue">GOOD EXAMPLE:</span></b> rephrase "<b>I like eating
pretzels</b>" to
"<b><span style="color:blue">I enjoy beers and beer snacks.</span></b>"
<br>
<b><span style="color:red">BAD EXAMPLE</span></b>: rephrase "<b>I like eating
pretzels</b>" to
"<b><span style="color:red">I like to chew and swallow twisted bread with
salt</span></b>" (unnatural description)
<br>
<br>
After you finish, click “Done with this HIT” to submit.
<br>
<br>
- Do not reference the task or MTurk itself when rephrasing the character.
<br>
- No racism, sexism or otherwise offensive comments, or the submission will be rejected.
''' | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/mturk/tasks/personachat/personachat_rephrase/task_config.py | 0.769903 | 0.595022 | task_config.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import pickle
import os
from parlai.core.params import ParlaiParser
from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
from parlai.core.worlds import create_task
def extract_and_save(opt):
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
teacher = world.agents[0]
personas_path = opt.get('personas_path')
if not os.path.exists(personas_path):
os.makedirs(personas_path)
new_episode = True
personas = []
while not teacher.epoch_done():
act = teacher.act()
if new_episode:
persona_text = act['text'].split('\n')[:-1]
if opt.get('persona_type') == 'both':
persona_1 = [p for p in persona_text if 'your persona:' in p]
persona_2 = [p for p in persona_text if 'partner\'s persona:' in p]
persona_1 = [p[p.find(':') + 1:] for p in persona_1]
persona_2 = [p[p.find(':') + 1:] for p in persona_2]
personas += [persona_1, persona_2]
else:
persona = [p for p in persona_text if 'persona:' in p]
persona = [p[p.find(':') + 1:] for p in persona]
personas.append(persona)
new_episode = act.get('episode_done')
else:
new_episode = act.get('episode_done')
for idx, persona in enumerate(personas):
with open('{}/{}.pkl'.format(personas_path, idx), 'wb') as f:
pickle.dump(persona, f)
print('---Finished extracting and saving personas, to {}'.format(
personas_path))
def main(opt):
print('---Extracting and saving personas---')
teacher_name = 'personachat:{}'.format(opt.get('persona_type'))
teacher_name += 'Revised' if opt.get('revised') else 'Original'
opt['task'] = teacher_name
assert 'personas_path' in opt, 'Must specify personas path'
opt['datatype'] = 'train:ordered:stream'
opt['numthreads'] = 1
opt['batchsize'] = 1
extract_and_save(opt)
if __name__ == '__main__':
parser = ParlaiParser()
parser.add_argument('--persona-type', default='both', type=str,
choices=['both', 'self', 'other'],
help='Which personas to load from personachat')
parser.add_argument('--revised', default=False, type='bool',
help='Whether to use revised personas')
opt = parser.parse_args() | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/mturk/tasks/personachat/personachat_chat/extract_and_save_personas.py | 0.606382 | 0.151561 | extract_and_save_personas.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
task_config = {}
"""A short and descriptive title about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT title appears in search results,
and everywhere the HIT is mentioned.
"""
task_config['hit_title'] = 'Play a character and chat!'
"""A description includes detailed information about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT description appears in the expanded
view of search results, and in the HIT and assignment screens.
"""
task_config['hit_description'] = 'You will chat to another person while adopting a specific persona.'
"""One or more words or phrases that describe the HIT, separated by commas.
On MTurk website, these words are used in searches to find HITs.
"""
task_config['hit_keywords'] = 'chat,dialog'
"""A detailed task description that will be shown on the HIT task preview page
and on the left side of the chat page. Supports HTML formatting.
"""
task_config['task_description'] = \
'''
<br>
<b><h4>Task Description</h4></b>
<br>
(You can keep accepting new HITs after you finish your current one, so keep working on it if you like the task!)
<br>
<b>In this task you will chitchat with another worker, playing the part of a given character.</b>
For example, your given character could be: <br><br> I am a vegetarian. I like swimming. My father used to work for Ford. My favorite band is Maroon5. I got a new job last month, which is about advertising design.
<br>
<br>
Chat with the other person naturally and <b><span style="color:blue">try to get to know each other, i.e.
both ask questions and answer questions of your chat partner
at the same time sticking to your own characters<span style="color:blue"></b>.
<br>
<br>
<b><span style="color:blue">You will get bonus for high quality dialogs.</span></b>
<b>Send short messages, <span style="color:red">max 15 words</span>.</b>
<b>Do not trivially copy the character descriptions into the message.</b>
After a given number of turns, click “DONE" to finish the chat.
There is a <b>2 min</b> time limit for each turn.
<br>
<br>
- Do not reference the task or MTurk itself during the conversation.
<br>
<b><span style="color:red">- No racism, sexism or otherwise offensive comments, or the submission will be rejected and we will report to Amazon.</b></span>
''' | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/mturk/tasks/personachat/personachat_chat/task_config.py | 0.771069 | 0.527803 | task_config.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
task_config = {}
"""A short and descriptive title about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT title appears in search results,
and everywhere the HIT is mentioned.
"""
task_config['hit_title'] = 'Create a Character'
"""A description includes detailed information about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT description appears in the expanded
view of search results, and in the HIT and assignment screens.
"""
task_config['hit_description'] = 'You will create a character (persona) by several sentences.'
"""One or more words or phrases that describe the HIT, separated by commas.
On MTurk website, these words are used in searches to find HITs.
"""
task_config['hit_keywords'] = 'chat,dialog,text,game'
"""A detailed task description that will be shown on the HIT task preview page
and on the left side of the chat page. Supports HTML formatting.
"""
task_config['task_description'] = '''
In this task, you will be asked to create a character (persona) description using <b><span style="color:blue">5 sentences</span></b>. An example would be:
<br>
<br>
"I am a vegetarian. I like swimming. My father used to work for Ford. My favorite band is Maroon5. I got a new job last month, which is about advertising design."
<br>
<br>
Please make each sentence short, max 15 words per sentence.
<br>
<b><span style="color:blue">Please do not use sensitive personal information in creating the character, as it may be publicly released.</span></b>
<br>
<br>
After you finish, click “Done with this HIT” to submit.
<br>
<br>
- Do not reference the task or MTurk itself when creating the character.
<br>
- No racism, sexism or otherwise offensive comments, or the submission will be rejected.
''' | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/mturk/tasks/personachat/personachat_collect_personas/task_config.py | 0.766206 | 0.41834 | task_config.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
task_config = {}
"""A short and descriptive title about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT title appears in search results,
and everywhere the HIT is mentioned.
"""
task_config['hit_title'] = 'Negotiate a deal with another user!'
"""A description includes detailed information about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT description appears in the expanded
view of search results, and in the HIT and assignment screens.
"""
task_config['hit_description'] = 'You will have a conversation with another user to agree how to divide some objects' \
'between you. Negotiate hard to get a deal worth as many points as possible!'
"""One or more words or phrases that describe the HIT, separated by commas.
On MTurk website, these words are used in searches to find HITs.
"""
task_config['hit_keywords'] = 'chat,dialog'
"""A detailed task description that will be shown on the HIT task preview page
and on the left side of the chat page. Supports HTML formatting.
"""
task_config['task_description'] = '''
<div id="preview">
You will have a conversation with another user to agree how to divide some objects between you. Negotiate hard to
get a deal worth as many points as possible<br><br>
If you are ready, please click "Accept HIT" to start this task.
</div>
<div id="input"></div>
<div float="right">
<button class="btn btn-primary" style="width: 120px; font-size: 16px; float: left; margin-left: 10px; padding: 0px;" id="id_no_deal_button">No Agreement</button>
<button class="btn btn-primary" style="width: 120px; font-size: 16px; float: left; margin-left: 10px; padding: 0px;" id="id_send_deal_button">Deal Agreed</button>
</div>
<script type="text/javascript">
var values = [0, 0, 0];
var counts = [0, 0, 0];
var num_messages = 0;
var numberOfItemTypes;
var your_selection = "";
var their_selection = "";
var image_path = "https://github.com/facebookresearch/end-to-end-negotiator/raw/master/src/images/";
var image_names = ["book", "hat", "ball"];
$("button#id_send_deal_button").hide();
$("button#id_no_deal_button").hide();
function enable_button(button, enable) {
if (enable) {
button.removeClass('disabled');
button.prop("disabled", false);
} else {
button.addClass("disabled");
button.prop("disabled", true);
}
}
function makeInput(items) {
$('#preview').html("");
var table = $('#input');
table.append('<h1>Divide these items between you and your partner. </h1>');
table.append('<p><i>Your partner sees the <b>SAME ITEMS</b> but with <b>DIFFERENT VALUES</b></i></p>');
table.append('<p><i>You get some items, and your partner will get the rest</i></p>');
table.append('<p><i>Please try hard to negotiate a good deal for you!</i></p><hr>');
table.append('<table>');
table.append('<tr><td colspan="4"><b>Items you BOTH see</b></td><td style="padding:0 15px 0 15px;"><b>Value Each to <i>You</i></b></td><td><b>Number You Get</b></td></tr><br><hr>');
var item = 0;
items.forEach(count_value => {
var number = count_value[0];
var value = count_value[1];
values[item] = value;
counts[item] = number;
var string = '';
string += '<tr>';
for (var i=0; i<4; i++) {
string += '<td>';
if (i < number) {
string += '<img width=75px src="' + image_path + '/' + image_names[item] + '.png"></img>';
}
string += '</td>';
}
string += '<td align="center" valign="middle"><p style="font-size:20px">' + value + "</p></td>";
string += '<td valign="middle">';
string += '<select id="item' + item + '">';
for (var i=0; i<=number; i++) {
string += '<option value="' + i + '">You get ' + i + ', they get ' + (number - i) + '</option>';
}
string += '</select>';
string += '</td></tr>';
table.append(string);
item++;
});
numberOfItemTypes = item;
table.append('</table>');
$("button#id_send_deal_button").show();
enable_button($("button#id_send_deal_button"), false);
}
(function() {
// Override handle_new_message function
handle_new_message = function() {
var new_message_id = arguments[0];
var message = arguments[1];
var agent_id = message.id;
var text = message.text;
var items = message.items;
var was_this_agent = (agent_id == cur_agent_id);
if (displayed_messages.indexOf(new_message_id) !== -1) {
// This message has already been seen and put up into the chat
log(new_message_id + ' was a repeat message', 1);
return;
}
log('New message, ' + new_message_id + ' from agent ' + agent_id, 1);
displayed_messages.push(new_message_id);
if (message.items) {
makeInput([[items.book_cnt, items.book_val], [items.hat_cnt, items.hat_val], [items.ball_cnt, items.ball_val]]);
} else if (text.startsWith('<selection>')) {
enable_button($("button#id_send_deal_button"), !was_this_agent && your_selection == "");
enable_button($("button#id_no_deal_button"), !was_this_agent && your_selection == "");
if (!was_this_agent) {
if (your_selection == "") {
$("button#id_no_deal_button").show();
}
$('#response-type-text-input').html("Your partner said a deal was agreed. " +
"Please enter the the agreed deal, or 'no agreement' if you don't think you agreed a deal.");
their_selection = text;
} else {
your_selection = text;
}
completion_message(your_selection, their_selection);
} else {
num_messages++;
if (num_messages >= 2) {
enable_button($("button#id_send_deal_button"), !was_this_agent);
}
if (num_messages >= 10) {
$("button#id_no_deal_button").show();
enable_button($("button#id_no_deal_button"), !was_this_agent);
}
message.id = (was_this_agent ? "YOU:" : "THEM:");
var agent_id = message.id;
add_message_to_conversation(was_this_agent ? "YOU" : "THEM", text, was_this_agent);
}
};
})();
function completion_message(your_selection, their_selection) {
if (your_selection == "" || their_selection == "") {
// Haven't both entered deal
return ;
}
var your_counts = your_selection.match(/\d+/g);
var their_counts = their_selection.match(/\d+/g);
var agree = true;
var all_zero = true;
var score = 0;
for (var i=0; i<counts.length; i++) {
var your_count = parseInt(your_counts[2 * i + 1]);
var their_count = parseInt(their_counts[2 * i + 1]);
all_zero = all_zero && your_count == 0;
all_zero = all_zero && their_count == 0;
agree = agree && ((your_count + their_count) == counts[i]);
score += your_count * values[i];
}
if (!agree) {
score = 0;
}
var msg = "";
msg += "<br><h1> Your score: " + score + "/10</h1>";
if (all_zero) {
msg += "Please try to reach agreements with your partner in future.";
} else if (!agree) {
msg += "You and your partner entered <b>different deals</b>. " +
"Please try to carefully agree deals in future, or your work may be rejected.";
} else if (score < 5) {
msg += "Please <b>negotiate harder</b> in future to get good deals or your work may be rejected.";
} else if (score < 7 && num_messages < 4) {
msg += "Please <b>negotiate harder</b> in future to get good deals or your work may be rejected.";
} else if (score == 9 || score == 10) {
msg += "You got a <b>great deal</b>, keep up the good work!";
} else {
msg += "Thanks for successfully agreeing a deal.";
}
$('#input').html(msg);
}
$("button#id_no_deal_button").on('click', function () {
send_deal("<selection> item0=0 item1=0 item2=0");
});
function send_deal(selection) {
// Disable the send button
enable_button($("button#id_no_deal_button"), false);
enable_button($("button#id_send_msg_button"), false);
enable_button($("button#id_send_deal_button"), false);
new_message_id = uuidv4();
your_selection = selection;
completion_message(your_selection, their_selection);
if (!their_selection) {
$('#response-type-text-input').html("Waiting for your partner to enter the deal...");
}
// Send a packet
send_packet(
TYPE_MESSAGE,
{
text: selection,
id: cur_agent_id,
message_id: new_message_id,
episode_done: false
},
true,
true,
function(msg) {
}
);
}
$("button#id_send_deal_button").on('click', function () {
var score = 0;
var selection = '<selection>';
var max = 0;
var num_items = 0;
for (var i=0; i<counts.length; i++) {
var e = document.getElementById("item" + i);
var num = e.options[e.selectedIndex].value;
score += values[i] * num;
max += values[i] * counts[i];
selection += ' item' + i + '=' + num;
num_items += counts[i];
}
if (num_items == 0) {
return ;
}
send_deal(selection);
});
</script>
''' | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/mturk/tasks/dealnodeal/task_config.py | 0.549157 | 0.337859 | task_config.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.worlds import World, validate
class QADataCollectionWorld(World):
"""
World for recording a person's question and answer given a context.
Assumes the context is a random context from a given task, e.g.
from SQuAD, CBT, etc.
"""
collector_agent_id = 'QA Collector'
def __init__(self, opt, task, agent):
self.task = task
self.agent = agent
self.episodeDone = False
self.turn_index = -1
def parley(self):
# Each turn starts from the QA Collector agent
self.turn_index = (self.turn_index + 1) % 2
ad = {'episode_done': False}
ad['id'] = self.__class__.collector_agent_id
if self.turn_index == 0:
# At the first turn, the QA Collector agent provides the context
# and prompts the person to ask a question regarding the context
# Get context from SQuAD teacher agent
qa = self.task.act()
context = '\n'.join(qa['text'].split('\n')[:-1])
# Wrap the context with a prompt telling the person what to do next
ad['text'] = (context +
'\n\nPlease provide a question given this context.')
self.agent.observe(validate(ad))
self.question = self.agent.act()
while self.question is None:
self.question = self.agent.act()
# Can log the person's question here
if self.turn_index == 1:
# At the second turn, the QA Collector collects the person's
# question from the first turn, and then prompts the
# person to provide the answer
# A prompt telling the person what to do next
ad['text'] = 'Thanks. And what is the answer to your question?'
ad['episode_done'] = True # end of episode
self.agent.observe(validate(ad))
self.answer = self.agent.act()
while self.answer is None:
self.answer = self.agent.act()
# Can log the person's answer here
self.episodeDone = True
def episode_done(self):
return self.episodeDone
def report(self):
pass
def shutdown(self):
self.task.shutdown()
self.agent.shutdown()
def review_work(self):
pass | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/messenger/tasks/qa_data_collection/worlds.py | 0.751511 | 0.381191 | worlds.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import json
import logging
import requests
import parlai.mturk.core.shared_utils as shared_utils
MAX_QUICK_REPLIES = 10
MAX_TEXT_CHARS = 640
MAX_QUICK_REPLY_TITLE_CHARS = 20
MAX_POSTBACK_CHARS = 1000
# Arbitrary attachments can be created as long as they adhere to the docs
# developers.facebook.com/docs/messenger-platform/send-messages/templates
# Message builders
def create_attachment(payload):
"""Create a simple url-based attachment"""
# TODO support data-based attachments?
assert payload['type'] in ['image', 'video', 'file', 'audio'], \
'unsupported attachment type'
assert ('url' in payload or 'attachment_id' in payload), \
'unsupported attachment method: must contain url or attachment_id'
if 'url' in payload:
return {'type': payload['type'],
'payload': {'url': payload['url']}}
elif 'attachment_id' in payload:
return {'type': payload['type'],
'payload': {'attachment_id': payload['attachment_id']}}
def create_reply_option(title, payload=''):
"""Create a quick reply option. Takes in display title and optionally extra
custom data.
"""
assert len(title) <= MAX_QUICK_REPLY_TITLE_CHARS, (
'Quick reply title length {} greater than the max of {}'.format(
len(title), MAX_QUICK_REPLY_TITLE_CHARS
)
)
assert len(payload) <= MAX_POSTBACK_CHARS, (
'Payload length {} greater than the max of {}'.format(
len(payload), MAX_POSTBACK_CHARS
)
)
return {'content_type': 'text', 'title': title, 'payload': payload}
def create_text_message(text, quick_replies=None):
"""Return a list of text messages from the given text. If the message is
too long it is split into multiple messages.
quick_replies should be a list of options made with create_reply_option.
"""
def _message(text_content, replies):
payload = {'text': text_content[:MAX_TEXT_CHARS]}
if replies:
payload['quick_replies'] = replies
return payload
tokens = [s[:MAX_TEXT_CHARS] for s in text.split(' ')]
splits = []
cutoff = 0
curr_length = 0
if quick_replies:
assert len(quick_replies) <= MAX_QUICK_REPLIES, (
'Number of quick replies {} greater than the max of {}'.format(
len(quick_replies), MAX_QUICK_REPLIES
)
)
for i in range(len(tokens)):
if tokens[i] == '[*SPLIT*]':
if ' '.join(tokens[cutoff:i - 1]).strip() != '':
splits.append(_message(' '.join(tokens[cutoff:i]), None))
cutoff = i + 1
curr_length = 0
if (curr_length + len(tokens[i]) > MAX_TEXT_CHARS):
splits.append(_message(' '.join(tokens[cutoff:i]), None))
cutoff = i
curr_length = 0
curr_length += len(tokens[i]) + 1
if cutoff < len(tokens):
splits.append(_message(' '.join(tokens[cutoff:]), quick_replies))
return splits
def create_attachment_message(attachment_item, quick_replies=None):
"""Create a message list made with only an attachment.
quick_replies should be a list of options made with create_reply_option.
"""
payload = {'attachment': attachment_item}
if quick_replies:
assert len(quick_replies) <= MAX_QUICK_REPLIES, (
'Number of quick replies {} greater than the max of {}'.format(
len(quick_replies), MAX_QUICK_REPLIES
)
)
payload['quick_replies'] = quick_replies
return [payload]
def create_list_element(element):
assert 'title' in element, 'List elems must have a title'
ret_elem = {
'title': element['title'],
'subtitle': '',
'default_action': {
'type': 'postback',
'title': element['title'],
'payload': element['title'],
}
}
if 'subtitle' in element:
ret_elem['subtitle'] = element['subtitle']
return ret_elem
def create_compact_list_message(raw_elems):
elements = [create_list_element(elem) for elem in raw_elems]
return {
'type': 'template',
'payload': {
'template_type': 'list',
'top_element_style': 'COMPACT',
'elements': elements,
}
}
class MessageSender():
"""MesageSender is a wrapper around the facebook messenger requests that
simplifies the process of sending content.
"""
def __init__(self, secret_token):
"""
server_url: url at which the server is to be run
port: port for the socket to operate on
message_callback: function to be called on incoming message objects
format: message_callback(self, data)
"""
self.auth_args = {'access_token': secret_token}
def send_sender_action(self, receiver_id, action):
api_address = 'https://graph.facebook.com/v2.6/me/messages'
message = {
'recipient': {
'id': receiver_id
},
"sender_action": action,
}
requests.post(
api_address,
params=self.auth_args,
json=message,
)
def send_read(self, receiver_id):
self.send_sender_action(receiver_id, "mark_seen")
def typing_on(self, receiver_id):
self.send_sender_action(receiver_id, "typing_on")
def send_fb_payload(self, receiver_id, payload, quick_replies=None):
"""Sends a payload to messenger, processes it if we can"""
api_address = 'https://graph.facebook.com/v2.6/me/messages'
if payload['type'] == 'list':
data = create_compact_list_message(payload['data'])
elif payload['type'] in ['image', 'video', 'file', 'audio']:
data = create_attachment(payload)
else:
data = payload['data']
message = {
"messaging_type": 'RESPONSE',
"recipient": {
"id": receiver_id
},
"message": {
"attachment": data,
}
}
if quick_replies is not None:
quick_replies = [create_reply_option(x, x) for x in quick_replies]
message['message']['quick_replies'] = quick_replies
response = requests.post(
api_address,
params=self.auth_args,
json=message,
)
result = response.json()
if 'error' in result:
if result['error']['code'] == 1200:
# temporary error please retry
response = requests.post(
api_address,
params=self.auth_args,
json=message,
)
result = response.json()
shared_utils.print_and_log(
logging.INFO,
'"Facebook response from message send: {}"'.format(result)
)
return result
def send_fb_message(self, receiver_id, message, is_response,
quick_replies=None):
"""Sends a message directly to messenger"""
api_address = 'https://graph.facebook.com/v2.6/me/messages'
if quick_replies is not None:
quick_replies = [create_reply_option(x, x) for x in quick_replies]
ms = create_text_message(message, quick_replies)
results = []
for m in ms:
if m['text'] == '':
continue # Skip blank messages
payload = {
"messaging_type": 'RESPONSE' if is_response else 'UPDATE',
"recipient": {
"id": receiver_id
},
"message": m
}
response = requests.post(
api_address,
params=self.auth_args,
json=payload
)
result = response.json()
if 'error' in result:
if result['error']['code'] == 1200:
# temporary error please retry
response = requests.post(
api_address,
params=self.auth_args,
json=payload,
)
result = response.json()
shared_utils.print_and_log(
logging.INFO,
'"Facebook response from message send: {}"'.format(result)
)
results.append(result)
return results
def upload_fb_attachment(self, payload):
"""Uploads an attachment using the Attachment Upload API and returns
an attachment ID.
"""
api_address = 'https://graph.facebook.com/v2.6/me/message_attachments'
assert payload['type'] in ['image', 'video', 'file', 'audio'], \
'unsupported attachment type'
if 'url' in payload:
message = {
"message": {
"attachment": {
"type": payload['type'],
"payload": {
"is_reusable": "true",
"url": payload['url']
}
}
}
}
response = requests.post(
api_address,
params=self.auth_args,
json=message,
)
elif 'filename' in payload:
message = {
"attachment": {
"type": payload['type'],
"payload": {
"is_reusable": "true",
}
}
}
filedata = {
"filedata": (
payload['filename'],
open(payload['filename'], 'rb'),
payload['type'] + '/' + payload['format']
)
}
response = requests.post(
api_address,
params=self.auth_args,
data={"message": json.dumps(message)},
files=filedata
)
result = response.json()
shared_utils.print_and_log(
logging.INFO,
'"Facebook response from attachment upload: {}"'.format(result)
)
return result | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/messenger/core/message_sender.py | 0.512205 | 0.290515 | message_sender.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""This file contains a list of all the tasks, their id and task name, description
and the tags associated with them.
"""
task_list = [
{
"id": "AQuA",
"display_name": "AQuA",
"task": "aqua",
"tags": ["All", "QA"],
"description": (
"Dataset containing algebraic word problems with rationales for "
"their answers. From Ling et. al. 2017, Link: "
"https://arxiv.org/pdf/1705.04146.pdf"
)
},
{
"id": "bAbI-1k",
"display_name": "bAbI 1k",
"task": "babi:All1k",
"tags": ["All", "QA"],
"description": (
"20 synthetic tasks that each test a unique aspect of text and "
"reasoning, and hence test different capabilities of learning "
"models. From Weston et al. '16. Link: "
"http://arxiv.org/abs/1502.05698 "
),
"notes": (
"You can access just one of the bAbI tasks with e.g. "
"'babi:Task1k:3' for task 3."
),
},
{
"id": "bAbI-10k",
"display_name": "bAbI 10k",
"task": "babi:All10k",
"tags": ["All", "QA"],
"description": (
"20 synthetic tasks that each test a unique aspect of text and "
"reasoning, and hence test different capabilities of learning "
"models. From Weston et al. '16. Link: "
"http://arxiv.org/abs/1502.05698"
),
"notes": (
"You can access just one of the bAbI tasks with e.g. 'babi:Task10k:3' "
"for task 3."
),
},
{
"id": "BookTest",
"display_name": "BookTest",
"task": "booktest",
"tags": ["All", "Cloze"],
"description": (
"Sentence completion given a few sentences as context from a book. "
"A larger version of CBT. From Bajgar et al., 16. Link: "
"https://arxiv.org/abs/1610.00956"
),
},
{
"id": "CBT",
"display_name": "Children's Book Test (CBT)",
"task": "cbt",
"tags": ["All", "Cloze"],
"description": (
"Sentence completion given a few sentences as context from a "
"children's book. From Hill et al., '16. Link: "
"https://arxiv.org/abs/1511.02301"
),
},
{
"id": "COPA",
"display_name": "Choice of Plausible Alternatives",
"task": "copa",
"tags": ["All", "Reasoning"],
"description": (
"The Choice Of Plausible Alternatives (COPA) evaluation provides "
"researchers with a tool for assessing progress in open-domain "
"commonsense causal reasoning. COPA consists of 1000 questions, "
"split equally into development and test sets of 500 questions "
"each. See http://people.ict.usc.edu/~gordon/copa.html for more "
"information"
),
},
{
"id": "CornellMovie",
"display_name": "Cornell Movie",
"task": "cornell_movie",
"tags": ["All", "ChitChat"],
"description": (
"Fictional conversations extracted from raw movie scripts. "
"Danescu-Niculescu-Mizil & Lee, '11. Link: "
"https://arxiv.org/abs/1106.3077"
),
},
{
"id": "DBLL-bAbI",
"display_name": "Dialog Based Language Learning: bAbI Task",
"task": "dbll_babi",
"tags": ["All", "Goal"],
"description": (
"Short dialogs based on the bAbI tasks, but in the form of a "
"question from a teacher, the answer from the student, and finally a "
"comment on the answer from the teacher. The aim is to find learning "
"models that use the comments to improve. From Weston '16. Link: "
"https://arxiv.org/abs/1604.06045. Tasks can be accessed with a "
"format like: 'python examples/display_data.py -t "
"dbll_babi:task:2_p0.5' which specifies task 2, and policy with 0.5 "
"answers correct, see the paper for more details of the tasks."
),
},
{
"id": "DBLL-Movie",
"display_name": "Dialog Based Language Learning: WikiMovies Task",
"task": "dbll_movie",
"tags": ["All", "Goal"],
"description": (
"Short dialogs based on WikiMovies, but in the form of a question "
"from a teacher, the answer from the student, and finally a comment "
"on the answer from the teacher. The aim is to find learning models "
"that use the comments to improve. From Weston '16. Link: "
"https://arxiv.org/abs/1604.06045"
),
},
{
"id": "dialog-bAbI",
"display_name": "Dialog bAbI",
"task": "dialog_babi",
"tags": ["All", "Goal"],
"description": (
"Simulated dialogs of restaurant booking, from Bordes et al. '16. "
"Link: https://arxiv.org/abs/1605.07683"
),
},
{
"id": "dialog-bAbI-plus",
"display_name": "Dialog bAbI+",
"task": "dialog_babi_plus",
"tags": ["All", "Goal"],
"description": (
"bAbI+ is an extension of the bAbI Task 1 dialogues with everyday "
"incremental dialogue phenomena (hesitations, restarts, and "
"corrections) which model the disfluencies and communication "
"problems in everyday spoken interaction in real-world environments. "
"See https://www.researchgate.net/publication/319128941_Challenging_Neural_"
"Dialogue_Models_with_Natural_Data_Memory_Networks_Fail_on_"
"Incremental_Phenomena,http://aclweb.org/anthology/D17-1235"
),
},
{
"id": "FVQA",
"display_name": "FVQA",
"task": "fvqa",
"tags": ["All", "Visual"],
"description": (
"The FVQA, a VQA dataset which requires, and supports, much deeper "
"reasoning. We extend a conventional visual question answering "
"dataset, which contains image-question-answer triplets, through "
"additional image-question-answer-supporting fact tuples. The "
"supporting fact is represented as a structural triplet, such as "
"<Cat,CapableOf,ClimbingTrees>. Link: "
"https://arxiv.org/abs/1606.05433"
),
},
{
"id": "DealNoDeal",
"display_name": "Deal or No Deal",
"task": "dealnodeal",
"tags": ["All", "Negotiation"],
"description": (
"End-to-end negotiation task which requires two agents to agree on "
"how to divide a set of items, with each agent assigning different "
"values to each item. From Lewis et al. '17. Link: "
"https://arxiv.org/abs/1706.05125"
),
},
{
"id": "MutualFriends",
"display_name": "MutualFriends",
"task": "mutualfriends",
"tags": ["All", "Goal"],
"description": (
"Task where two agents must discover which friend of theirs is "
"mutual based on the friends's attributes. From He He et al. '17. "
"Link: https://stanfordnlp.github.io/cocoa/"
),
},
{
"id": "MCTest",
"display_name": "MCTest",
"task": "mctest",
"tags": ["All", "QA"],
"description": (
"Questions about short children's stories, from Richardson et al. "
"'13. Link: https://www.microsoft.com/en-us/research/publication/"
"mctest-challenge-dataset-open-domain-machine-comprehension-text/"
),
},
{
"id": "MovieDD-QA",
"display_name": "Movie Dialog QA",
"task": "moviedialog:Task:1",
"tags": ["All", "QA", "MovieDD"],
"description": (
"Closed-domain QA dataset asking templated questions about movies, "
"answerable from Wikipedia, similar to WikiMovies. From Dodge et al. "
"'15. Link: https://arxiv.org/abs/1511.06931"
),
},
{
"id": "MovieDD-QARecs",
"display_name": "Movie Dialog QA Recommendations",
"task": "moviedialog:Task:3",
"tags": ["All", "Goal", "MovieDD"],
"description": (
"Dialogs discussing questions about movies as well as "
"recommendations. From Dodge et al. '15. Link: "
"https://arxiv.org/abs/1511.06931"
),
},
{
"id": "MovieDD-Recs",
"display_name": "Movie Dialog Recommendations",
"task": "moviedialog:Task:2",
"tags": ["All", "QA", "MovieDD"],
"description": (
"Questions asking for movie recommendations. From Dodge et al. '15. "
"Link: https://arxiv.org/abs/1511.06931"
),
},
{
"id": "MovieDD-Reddit",
"display_name": "Movie Dialog Reddit",
"task": "moviedialog:Task:4",
"tags": ["All", "ChitChat", "MovieDD"],
"description": (
"Dialogs discussing Movies from Reddit (the Movies SubReddit). From "
"Dodge et al. '15. Link: https://arxiv.org/abs/1511.06931"
),
},
{
"id": "MTurkWikiMovies",
"display_name": "MTurk WikiMovies",
"task": "mturkwikimovies",
"tags": ["All", "QA"],
"description": (
"Closed-domain QA dataset asking MTurk-derived questions about "
"movies, answerable from Wikipedia. From Li et al. '16. Link: "
"https://arxiv.org/abs/1611.09823"
),
},
{
"id": "MultiNLI",
"display_name": "MultiNLI",
"task": "multinli",
"tags": ["All", "Entailment"],
"description": (
"A dataset designed for use in the development and evaluation of "
"machine learning models for sentence understanding. Each example "
"contains a premise and hypothesis. Model has to predict whether "
"premise and hypothesis entail, contradict or are neutral to each "
"other. From Williams et al. '17. Link: "
"https://arxiv.org/abs/1704.05426"
),
},
{
"id": "NarrativeQA",
"display_name": "NarrativeQA",
"task": "narrative_qa",
"tags": ["All", "QA"],
"description": (
"A dataset and set of tasks in which the reader must answer "
"questions about stories by reading entire books or movie scripts. "
"From Kočiský et. al. '17. Link: https://arxiv.org/abs/1712.07040'"
),
"notes": (
"You can access summaries only task for NarrativeQA by using task "
"'narrative_qa:summaries'. By default, only stories are provided."
),
},
{
"id": "OpenSubtitles",
"display_name": "Open Subtitles",
"task": "opensubtitles",
"tags": ["All", "ChitChat"],
"description": (
"Dataset of dialogs from movie scripts. Version 2018: "
"http://opus.lingfil.uu.se/OpenSubtitles2018.php, version 2009: "
"http://opus.lingfil.uu.se/OpenSubtitles.php. A variant of the "
"dataset used in Vinyals & Le '15, "
"https://arxiv.org/abs/1506.05869."
),
},
{
"id": "personalized-dialog-full",
"display_name": "Personalized Dialog Full Set",
"task": "personalized_dialog:AllFull",
"tags": ["All", "Goal", "Personalization"],
"description": (
"Simulated dataset of restaurant booking focused on personalization "
"based on user profiles. From Joshi et al. '17. Link: "
"https://arxiv.org/abs/1706.07503"
),
},
{
"id": "personalized-dialog-small",
"display_name": "Personalized Dialog Small Set",
"task": "personalized_dialog:AllSmall",
"tags": ["All", "Goal", "Personalization"],
"description": (
"Simulated dataset of restaurant booking focused on personalization "
"based on user profiles. From Joshi et al. '17. Link: "
"https://arxiv.org/abs/1706.07503"
),
},
{
"id": "QACNN",
"display_name": "QA CNN",
"task": "qacnn",
"tags": ["All", "Cloze"],
"description": (
"Cloze dataset based on a missing (anonymized) entity phrase from a "
"CNN article, Hermann et al. '15. Link: "
"https://arxiv.org/abs/1506.03340"
),
},
{
"id": "QADailyMail",
"display_name": "QA Daily Mail",
"task": "qadailymail",
"tags": ["All", "Cloze"],
"description": (
"Cloze dataset based on a missing (anonymized) entity phrase from a "
"Daily Mail article, Hermann et al. '15. Link: "
"https://arxiv.org/abs/1506.03340"
),
},
{
"id": "SimpleQuestions",
"display_name": "Simple Questions",
"task": "simplequestions",
"tags": ["All", "QA"],
"description": (
"Open-domain QA dataset based on Freebase triples from Bordes et "
"al. '15. Link: https://arxiv.org/abs/1506.02075"
),
},
{
"id": "SNLI",
"display_name": "The Stanford Natural Language Inference (SNLI) Corpus",
"task": "snli",
"tags": ["All", "Entailment"],
"description": (
"The SNLI corpus (version 1.0) is a collection of 570k "
"human-written English sentence pairs manually labeled for balanced "
"classification with the labels entailment, contradiction, and "
"neutral, supporting the task of natural language inference (NLI), "
"also known as recognizing textual entailment (RTE). See "
"https://nlp.stanford.edu/projects/snli/"
),
},
{
"id": "SQuAD2",
"display_name": "SQuAD2",
"task": "squad2",
"tags": ["All", "QA"],
"description": (
"Open-domain QA dataset answerable from a given paragraph from "
"Wikipedia, from Rajpurkar & Jia et al. '18. Link: "
"http://arxiv.org/abs/1806.03822"
),
},
{
"id": "SQuAD",
"display_name": "SQuAD",
"task": "squad",
"tags": ["All", "QA"],
"description": (
"Open-domain QA dataset answerable from a given paragraph from "
"Wikipedia, from Rajpurkar et al. '16. Link: "
"https://arxiv.org/abs/1606.05250"
),
},
{
"id": "TriviaQA",
"display_name": "TriviaQA",
"task": "triviaqa",
"tags": ["All", "QA"],
"description": (
"Open-domain QA dataset with question-answer-evidence triples, from "
"Joshi et al. '17. Link: https://arxiv.org/abs/1705.03551"
),
},
{
"id": "TaskNTalk",
"display_name": "Task N' Talk",
"task": "taskntalk",
"tags": ["All", "Goal"],
"description": (
"Dataset of synthetic shapes described by attributes, for agents to "
"play a cooperative QA game, from Kottur et al. '17. Link: "
"https://arxiv.org/abs/1706.08502"
),
},
{
"id": "Ubuntu",
"display_name": "Ubuntu",
"task": "ubuntu",
"tags": ["All", "ChitChat"],
"description": (
"Dialogs between an Ubuntu user and an expert trying to fix issue, "
"from Lowe et al. '15. Link: https://arxiv.org/abs/1506.08909"
),
},
{
"id": "WebQuestions",
"display_name": "Web Questions",
"task": "webquestions",
"tags": ["All", "QA"],
"description": (
"Open-domain QA dataset from Web queries from Berant et al. '13. "
"Link: http://www.aclweb.org/anthology/D13-1160"
),
},
{
"id": "WikiMovies",
"display_name": "WikiMovies",
"task": "wikimovies",
"tags": ["All", "QA"],
"description": (
"Closed-domain QA dataset asking templated questions about movies, "
"answerable from Wikipedia. From Miller et al. '16. Link: "
"https://arxiv.org/abs/1606.03126"
),
},
{
"id": "WikiQA",
"display_name": "WikiQA",
"task": "wikiqa",
"tags": ["All", "QA"],
"description": (
"Open domain QA from Wikipedia dataset from Yang et al. '15. Link: "
"https://www.microsoft.com/en-us/research/publication/wikiqa-a-"
"challenge-dataset-for-open-domain-question-answering/"
),
},
{
"id": "VQAv1",
"display_name": "VQAv1",
"task": "vqa_v1",
"tags": ["All", "Visual"],
"description": (
"Open-ended question answering about visual content. From Agrawal "
"et al. '15. Link: https://arxiv.org/abs/1505.00468"
),
},
{
"id": "VQAv2",
"display_name": "VQAv2",
"task": "vqa_v2",
"tags": ["All", "Visual"],
"description": (
"Bigger, more balanced version of the original VQA dataset. From "
"Goyal et al. '16. Link: https://arxiv.org/abs/1612.00837"
),
},
{
"id": "VisDial",
"display_name": "VisDial",
"task": "visdial",
"tags": ["All", "Visual"],
"description": (
"Task which requires agents to hold a meaningful dialog about "
"visual content. From Das et al. '16. Link: "
"https://arxiv.org/abs/1611.08669"
),
},
{
"id": "MNIST_QA",
"display_name": "MNIST_QA",
"task": "mnist_qa",
"tags": ["All", "Visual"],
"description": (
"Task which requires agents to identify which number they are "
"seeing. From the MNIST dataset."
),
},
{
"id": "InsuranceQA",
"display_name": "InsuranceQA",
"task": "insuranceqa",
"tags": ["All", "QA"],
"description": (
"Task which requires agents to identify high quality answers "
"composed by professionals with deep domain knowledge. From Feng et "
"al. '15. Link: https://arxiv.org/abs/1508.01585"
),
},
{
"id": "MS_MARCO",
"display_name": "MS_MARCO",
"task": "ms_marco",
"tags": ["All", "QA"],
"description": (
"A large scale Machine Reading Comprehension Dataset with questions "
"sampled from real anonymized user queries and contexts from web "
"documents. From Nguyen et al. '16. Link: "
"https://arxiv.org/abs/1611.09268"
),
},
{
"id": "CLEVR",
"display_name": "CLEVR",
"task": "clevr",
"tags": ["All", "Visual"],
"description": (
"A visual reasoning dataset that tests abilities such as attribute "
"identification, counting, comparison, spatial relationships, and "
"logical operations. From Johnson et al. '16. Link: "
"https://arxiv.org/abs/1612.06890"
),
},
{
"id": "nlvr",
"display_name": "nlvr",
"task": "nlvr",
"tags": ["All", "Visual"],
"description": (
"Cornell Natural Language Visual Reasoning (NLVR) is a language "
"grounding dataset based on pairs of natural language statements "
"grounded in synthetic images. From Suhr et al. '17. Link: "
"http://lic.nlp.cornell.edu/nlvr/"
),
},
{
"id": "WMT",
"display_name": "WMT",
"task": "wmt",
"tags": ["All", "MT"],
"description": (
"Workshop on Machine Translation task, currently only includes en_de."
),
},
{
"id": "IWSLT14",
"display_name": "IWSLT14",
"task": "iwslt14",
"tags": ["All", "MT"],
"description": (
"2014 International Workshop on Spoken Language task, currently "
"only includes en_de and de_en. From Cettolo et al. '12. Link: "
"wit3.fbk.eu"
),
},
{
"id": "ConvAI2",
"display_name": "ConvAI2",
"task": "convai2",
"tags": ["All", "ChitChat"],
"description": (
"A chit-chat dataset based on PersonaChat "
"(https://arxiv.org/abs/1801.07243) for a NIPS 2018 competition. "
"Link: http://convai.io/."
),
},
{
"id": "ConvAI_ChitChat",
"display_name": "ConvAI_ChitChat",
"task": "convai_chitchat",
"tags": ["All", "ChitChat"],
"description": (
"Human-bot dialogues containing free discussions of randomly chosen "
"paragraphs from SQuAD. Link to dataset: http://convai.io/data/"
),
},
{
"id": "Dialogue_QE",
"display_name": "Dialogue_QE",
"task": "dialogue_qe",
"tags": ["All"],
"description": (
"Human-bot dialogues labelled for quality at the level of "
"dialogues. Can be used to train dialogue-level metric for dialogue "
"systems. Link to dataset: http://convai.io/data/"
),
},
{
"id": "QAngaroo",
"display_name": "QAngaroo",
"task": "qangaroo",
"tags": ["All", "QA"],
"description": (
"Reading Comprehension with Multiple Hop. Including two datasets: "
"WIKIHOP built on on wikipedia, MEDHOP built on paper abstracts from "
"PubMed. Link to dataset: http://qangaroo.cs.ucl.ac.uk/"
),
},
{
"id": "SCAN",
"display_name": "SCAN",
"task": "scan",
"tags": ["Goal", "All"],
"description": (
"SCAN is a set of simple language-driven navigation tasks for "
"studying compositional learning and zero-shot generalization. The "
"SCAN tasks were inspired by the CommAI environment, which is the "
"origin of the acronym (Simplified versions of the CommAI Navigation "
"tasks). See the paper: https://arxiv.org/abs/1711.00350 or data: "
"https://github.com/brendenlake/SCAN"
),
},
{
"id": "Persona-Chat",
"display_name": "Persona-Chat",
"task": "personachat",
"tags": ["ChitChat", "All"],
"description": (
"A chit-chat dataset where paired Turkers are given assigned "
"personas and chat to try to get to know each other. See the paper: "
"https://arxiv.org/abs/1801.07243"
),
},
{
"id": "Twitter",
"display_name": "Twitter",
"task": "twitter",
"tags": ["All", "ChitChat"],
"description": (
"Twitter data from: https://github.com/Marsan-Ma/chat_corpus/. No "
"train/valid/test split was provided so 10k for valid and 10k for "
"test was chosen at random."
),
},
{
"id": "Wikipedia",
"display_name": "Wikipedia",
"task": 'wikipedia',
"tags": ["All"],
"description": (
"Dump of Wikipedia articles from 2/3/18"
),
"notes": (
"Specify ':full' for the full articles to be returned, otherwise "
"defaults to ':summary', which provides the first paragraphs. To put "
"the article in the labels and the title in the text, specify "
"':key-value' at the end (for a title/content key-value "
"association)"
),
},
{
"id": "Flickr30k",
"display_name": "Flickr30k",
"task": "flickr30k",
"tags": ["All", "Visual"],
"description": (
"30k captioned images pulled from Flickr compiled by UIUC: "
"http://web.engr.illinois.edu/~bplumme2/Flickr30kEntities/. Based "
"off of these papers: https://arxiv.org/abs/1505.04870v2, "
"http://aclweb.org/anthology/Q14-1006"
),
},
{
"id": "COCO_Captions",
"display_name": "COCO_Captions",
"task": "coco_caption",
"tags": ["All", "Visual"],
"description": (
"COCO annotations derived from the 2015 COCO Caption Competition. "
"Link to dataset: http://cocodataset.org/#download"
),
},
{
"id": "integration_tests",
"display_name": "Integration Tests",
"task": "integration_tests",
"tags": ["All", "Debug"],
"description": (
"Artificial tasks for ensuring models perform as expected"
),
},
{
"id": "ConvAI2_wild_evaluation",
"display_name": "ConvAI2_wild_evaluation",
"task": "convai2_wild_evaluation",
"tags": ["All", "ChitChat"],
"description": (
"Dataset collected during the wild evaluation of ConvaAI2 participants "
"bots (http://convai.io). 60% train, 20% valid and 20% test is chosen at "
"random from the whole dataset."
),
},
{
"id": "Personality_Captions",
"display_name": "Personality_Captions",
"task": "personality_captions",
"tags": ["All", "Visual"],
"description": (
"200k images from the YFCC100m dataset "
"(https://multimediacommons.wordpress.com/yfcc100m-core-dataset/), "
"with captions conditioned on one of 215 personalities. See "
"https://arxiv.org/abs/1810.10665 for more information."
),
"notes": (
"If you have already downloaded the images, please specify with "
"the `--yfcc-path` flag, as the image download script takes a "
"very long time to run"
),
},
{
"id": "Image_Chat",
"display_name": "Image_Chat",
"task": "image_chat",
"tags": ["All", "Visual", "ChitChat"],
"description": (
"202k dialogues and 401k utterances over 202k images from "
"the YFCC100m dataset"
"(https://multimediacommons.wordpress.com/yfcc100m-core-dataset/)"
"using 215 possible personality traits"
"see https://klshuster.github.io/image_chat/ for more information."
),
"notes": (
"If you have already downloaded the images, please specify with "
"the `--yfcc-path` flag, as the image download script takes a "
"very long time to run"
),
},
{
"id": "Wizard_of_Wikipedia",
"display_name": "Wizard_of_Wikipedia",
"task": "wizard_of_wikipedia",
"tags": ["All", "ChitChat"],
"description": (
"A dataset with conversations directly grounded with knowledge "
"retrieved from Wikipedia. Contains 201k utterances from 22k "
"dialogues spanning over 1300 diverse topics, split into train, "
"test, and valid sets. The test and valid sets are split "
"into two sets each: one with overlapping topics with the train "
"set, and one with unseen topics."
"See https://arxiv.org/abs/1811.01241 for more information."
),
"notes": (
"To access the different valid/test splits (unseen/seen), specify "
"the corresponding split (`random_split` for seen, `topic_split` "
"for unseen) after the last colon in the task. "
"E.g. `wizard_of_wikipedia:WizardDialogKnowledgeTeacher:random_split`"
),
},
] | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/task_list.py | 0.889271 | 0.416322 | task_list.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import FixedDialogTeacher
from parlai.core.image_featurizers import ImageLoader
from parlai.tasks.vqa_v1.agents import VQADataset
from .build import build
from parlai.tasks.coco_caption.build_2014 import buildImage as buildImage_2014
from parlai.tasks.coco_caption.build_2015 import buildImage as buildImage_2015
import json
import os
def _path(opt):
build(opt)
buildImage_2014(opt)
buildImage_2015(opt)
dt = opt['datatype'].split(':')[0]
img_version = None
if dt == 'train':
ques_suffix = 'v2_OpenEnded_mscoco_train2014'
annotation_suffix = 'v2_mscoco_train2014'
img_suffix = os.path.join('train2014', 'COCO_train2014_')
img_version = '2014'
elif dt == 'valid':
ques_suffix = 'v2_OpenEnded_mscoco_val2014'
annotation_suffix = 'v2_mscoco_val2014'
img_suffix = os.path.join('val2014', 'COCO_val2014_')
img_version = '2014'
elif dt == 'test':
ques_suffix = 'v2_OpenEnded_mscoco_test2015'
annotation_suffix = 'None'
img_suffix = os.path.join('test2015', 'COCO_test2015_')
img_version = '2015'
else:
raise RuntimeError('Not valid datatype.')
data_path = os.path.join(opt['datapath'], 'VQA-v2',
ques_suffix + '_questions.json')
annotation_path = os.path.join(opt['datapath'], 'VQA-v2',
annotation_suffix + '_annotations.json')
image_path = os.path.join(opt['datapath'],
'COCO-IMG-{}'.format(img_version), img_suffix)
return data_path, annotation_path, image_path
class DefaultDataset(VQADataset):
pass
class OeTeacher(FixedDialogTeacher):
"""VQA v2.0 Open-Ended teacher, which loads the json VQA data and
implements the ``get`` method to return additional metadata.
"""
def __init__(self, opt, shared=None):
super().__init__(opt)
self.image_mode = opt.get('image_mode', 'none')
if shared and 'ques' in shared:
# another instance was set up already, just reference its data
self.ques = shared['ques']
if 'annotation' in shared:
self.annotation = shared['annotation']
self.image_loader = shared['image_loader']
else:
# need to set up data from scratch
data_path, annotation_path, self.image_path = _path(opt)
self._setup_data(data_path, annotation_path)
self.image_loader = ImageLoader(opt)
self.reset()
def reset(self):
super().reset() # call parent reset so other fields can be set up
self.example = None # set up caching fields
def num_examples(self):
return len(self.ques['questions'])
def num_episodes(self):
return self.num_examples()
def submit_load_request(self, image_id):
img_path = self.image_path + '%012d.jpg' % (image_id)
self.data_loader.request_load(
self.receive_data, self.image_loader.load, (img_path,)
)
def get(self, episode_idx, entry_idx=0):
qa = self.ques['questions'][episode_idx]
question = qa['question']
action = {
'text': question,
'image_id': qa['image_id'],
'episode_done': True
}
if not self.datatype.startswith('test'):
# test set annotations are not available for this dataset
anno = self.annotation['annotations'][episode_idx]
action['labels'] = [ans['answer'] for ans in anno['answers']]
return action
def next_example(self):
"""Returns the next example from this dataset after starting to queue
up the next example.
"""
ready = None
# pull up the currently queued example
if self.example is not None:
if self.image_mode != 'none':
# move the image we loaded in the background into the example
image = self.data_queue.get()
self.example['image'] = image
ready = (self.example, self.epochDone)
# get the next base example: super().next_example() calls self.get()
self.example, self.epochDone = super().next_example()
if self.image_mode != 'none' and 'image_id' in self.example:
# load the next image in the background
image_id = self.example['image_id']
self.submit_load_request(image_id)
# Try to return the previously cached example
if ready is None:
return self.next_example()
else:
return ready
def share(self):
shared = super().share()
shared['ques'] = self.ques
if hasattr(self, 'annotation'):
shared['annotation'] = self.annotation
shared['image_loader'] = self.image_loader
return shared
def _setup_data(self, data_path, annotation_path):
print('loading: ' + data_path)
with open(data_path) as data_file:
self.ques = json.load(data_file)
if not self.datatype.startswith('test'):
print('loading: ' + annotation_path)
with open(annotation_path) as data_file:
self.annotation = json.load(data_file)
class AllTeacher(OeTeacher):
"""
VQA v2.0 Open-Ended teacher, which inherits from OeTeacher and
gives access to the multiple choice answer.
"""
def act(self):
action = super().act()
if not self.datatype.startswith('test'):
anno = self.annotation['annotations'][self.episode_idx]
self.mclabel = [anno['multiple_choice_answer']]
if self.datatype.startswith('train'):
action['mc_label'] = self.mclabel
return action
class DefaultTeacher(OeTeacher):
pass | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/vqa_v2/agents.py | 0.743075 | 0.167117 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import os
def create_fb_format(outpath, dtype, inpath, inpath2):
print('building fbformat:' + dtype)
fout = open(os.path.join(outpath, dtype + '.txt'), 'w')
with open(inpath + '.tsv') as f:
lines = [line.strip('\n') for line in f]
if inpath2 is None:
fname_ans = inpath + '.ans'
else:
fname_ans = inpath2
with open(fname_ans) as f:
ans = [line.strip('\n') for line in f]
for i in range(len(lines)):
l = lines[i].split('\t')
off = 3
for j in range(4):
ai = ans[i].split('\t')[j]
if ai == 'A':
ai = 0
if ai == 'B':
ai = 1
if ai == 'C':
ai = 2
if ai == 'D':
ai = 3
a = l[off + 1 + ai]
s = ('1 ' + l[2] + ' ' + l[off] + '\t' + a + '\t\t' +
l[off + 1] + '|' + l[off + 2] + '|' +
l[off + 3] + '|' + l[off + 4])
off = off + 5
fout.write(s + '\n')
fout.close()
def build(opt):
dpath = os.path.join(opt['datapath'], 'MCTest')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
fname = 'mctest.tar.gz'
url = 'http://parl.ai/downloads/mctest/' + fname
build_data.download(url, dpath, fname)
build_data.untar(dpath, fname)
dpext = os.path.join(dpath, 'mctest')
create_fb_format(dpath, 'train160',
os.path.join(dpext, 'MCTest', 'mc160.train'), None)
create_fb_format(dpath, 'valid160',
os.path.join(dpext, 'MCTest', 'mc160.dev'), None)
create_fb_format(dpath, 'test160',
os.path.join(dpext, 'MCTest', 'mc160.test'),
os.path.join(dpext, 'MCTestAnswers', 'mc160.test.ans'))
create_fb_format(dpath, 'train500',
os.path.join(dpext, 'MCTest', 'mc500.train'), None)
create_fb_format(dpath, 'valid500',
os.path.join(dpext, 'MCTest', 'mc500.dev'), None)
create_fb_format(dpath, 'test500',
os.path.join(dpext, 'MCTest', 'mc500.test'),
os.path.join(dpext, 'MCTestAnswers', 'mc500.test.ans'))
# Mark the data as built.
build_data.mark_done(dpath, version_string=version) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/mctest/build.py | 0.626467 | 0.205097 | build.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""This is a simple question answering task on the MNIST dataset.
In each episode, agents are presented with a number, which they are asked to
identify.
Useful for debugging and checking that one's image model is up and running.
"""
from parlai.core.teachers import DialogTeacher
from .build import build
import json
import os
def _path(opt):
build(opt)
dt = opt['datatype'].split(':')[0]
labels_path = os.path.join(opt['datapath'], 'mnist', dt, 'labels.json')
image_path = os.path.join(opt['datapath'], 'mnist', dt)
return labels_path, image_path
class MnistQATeacher(DialogTeacher):
"""
This version of MNIST inherits from the core Dialog Teacher, which just
requires it to define an iterator over its data `setup_data` in order to
inherit basic metrics, a `act` function, and enables
Hogwild training with shared memory with no extra work.
"""
def __init__(self, opt, shared=None):
self.datatype = opt['datatype'].split(':')[0]
labels_path, self.image_path = _path(opt)
opt['datafile'] = labels_path
self.id = 'mnist_qa'
self.num_strs = ['zero', 'one', 'two', 'three', 'four',
'five', 'six', 'seven', 'eight', 'nine']
super().__init__(opt, shared)
def label_candidates(self):
return [str(x) for x in range(10)] + self.num_strs
def setup_data(self, path):
print('loading: ' + path)
with open(path) as labels_file:
self.labels = json.load(labels_file)
self.question = 'Which number is in the image?'
episode_done = True
for i in range(len(self.labels)):
img_path = os.path.join(self.image_path, '%05d.bmp' % i)
label = [self.labels[i], self.num_strs[int(self.labels[i])]]
yield (self.question, label, None, None, img_path), episode_done
class DefaultTeacher(MnistQATeacher):
pass | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/mnist_qa/agents.py | 0.833562 | 0.447762 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import FbDialogTeacher
from .build import build
import copy
import os
def _path(opt, filtered):
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
return os.path.join(opt['datapath'], 'CornellMovie',
dt + filtered + '.txt')
class DefaultTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['datafile'] = _path(opt, '')
opt['cands_datafile'] = opt['datafile']
super().__init__(opt, shared)
class DoubleTeacher(DefaultTeacher):
"""This version creates text-label pairs from the perspective of both
speakers.
"""
def setup_data(self, path):
"""Adds additional perspectives.
For example, in the conversation:
x1 y1
x2 y2
x3
Creates the additional dialog:
'' x1
y1 x2
y2 x3
And if a y3 was available in response to x3, also would have added:
y3
"""
def rebuild(entries):
new_list = []
if len(entries) > 0:
# prepend silent input => x_0
new_list.append(('', [entries[0][0]]))
# add all ( y_t => x_(t+1) ) pairs
new_list.extend([(entries[i][1][0], [entries[i + 1][0]])
for i in range(len(entries) - 1)])
if len(entries[-1]) > 1 and entries[-1][1]:
# add y_n => '', if last y avail
new_list.append((entries[-1][1][0], None))
return new_list
# this shows conversations in both directions
alternate = []
for entry, new in super().setup_data(path):
if new:
for i, e in enumerate(rebuild(alternate)):
yield e, i == 0
alternate.clear()
alternate.append(entry)
yield entry, new
if alternate:
for i, e in enumerate(rebuild(alternate)):
yield e, i == 0 | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/cornell_movie/agents.py | 0.712432 | 0.206514 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import codecs
import os
def create_fb_format(lines_file, convo_file, outpath):
print('[building fbformat]')
with open(os.path.join(outpath, 'train.txt'), 'w') as ftrain, \
open(os.path.join(outpath, 'valid.txt'), 'w') as fvalid, \
open(os.path.join(outpath, 'test.txt'), 'w') as ftest:
lines = {}
codecs.register_error('strict', codecs.ignore_errors)
with codecs.open(lines_file, 'r') as f:
for line in f:
l = line.split(' +++$+++ ')
lines[l[0]] = ' '.join(l[4:]).strip('\n').replace('\t', ' ')
cnt = 0
with codecs.open(convo_file, 'r') as f:
for line in f:
l = line.split(' ')
convo = ' '.join(l[6:]).strip('\n').strip('[').strip(']')
c = convo.replace("'", '').replace(' ', '').split(',')
# forward conversation
s = ''
index = 0
for i in range(0, len(c), 2):
index += 1
s += str(index) + ' ' + lines[c[i]]
if len(c) > i + 1:
s += '\t' + lines[c[i + 1]]
s += '\n'
cnt = cnt + 1
handle = ftrain
if (cnt % 10) == 0:
handle = ftest
if (cnt % 10) == 1:
handle = fvalid
handle.write(s + '\n')
def build(opt):
dpath = os.path.join(opt['datapath'], 'CornellMovie')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
fname = 'cornell_movie_dialogs_corpus.tgz'
url = 'http://parl.ai/downloads/cornell_movie/' + fname
build_data.download(url, dpath, fname)
build_data.untar(dpath, fname)
dpext = os.path.join(dpath, 'cornell movie-dialogs corpus')
create_fb_format(os.path.join(dpext, 'movie_lines.txt'),
os.path.join(dpext, 'movie_conversations.txt'),
dpath)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/cornell_movie/build.py | 0.654784 | 0.177383 | build.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
import gzip
import os
import parlai.core.build_data as build_data
class ParseInsuranceQA(object):
version = None
label2answer_fname = None
@classmethod
def read_gz(cls, filename):
f = gzip.open(filename, 'rb')
return [x.decode('utf-8') for x in f.readlines()]
@classmethod
def readlines(cls, path):
if path.endswith(".gz"):
lines = cls.read_gz(path)
else:
lines = open(path).readlines()
return lines
@classmethod
def wids2sent(cls, wids, d_vocab):
return " ".join([d_vocab[w] for w in wids])
@classmethod
def read_vocab(cls, vocab_path):
d_vocab = {}
with open(vocab_path, "r") as f:
for line in f:
fields = line.rstrip('\n').split("\t")
if len(fields) != 2:
raise ValueError(
"vocab file (%s) corrupted. Line (%s)" %
(repr(line), vocab_path)
)
else:
wid, word = fields
d_vocab[wid] = word
return d_vocab
@classmethod
def read_label2answer(cls, label2answer_path_gz, d_vocab):
lines = cls.readlines(label2answer_path_gz)
d_label_answer = {}
for line in lines:
fields = line.rstrip("\n").split("\t")
if len(fields) != 2:
raise ValueError(
"label2answer file (%s) corrupted. Line (%s)" %
(repr(line), label2answer_path_gz)
)
else:
aid, s_wids = fields
sent = cls.wids2sent(s_wids.split(), d_vocab)
d_label_answer[aid] = sent
return d_label_answer
@classmethod
def create_fb_format(cls, out_path, dtype, inpath, d_vocab, d_label_answer):
pass
@classmethod
def write_data_files(cls, dpext, out_path, d_vocab, d_label_answer):
pass
@classmethod
def build(cls, dpath):
print("building version: %s" % cls.version)
# the root of dataset
dpext = os.path.join(dpath, 'insuranceQA-master/%s' % cls.version)
# read vocab file
vocab_path = os.path.join(dpext, "vocabulary")
d_vocab = cls.read_vocab(vocab_path)
# read label2answer file
label2answer_path_gz = os.path.join(dpext, cls.label2answer_fname)
d_label_answer = cls.read_label2answer(label2answer_path_gz, d_vocab)
# Create out path
out_path = os.path.join(dpath, cls.version)
build_data.make_dir(out_path)
# Parse and write data files
cls.write_data_files(dpext, out_path, d_vocab, d_label_answer)
class ParseInsuranceQAV1(ParseInsuranceQA):
version = "V1"
label2answer_fname = "answers.label.token_idx"
@classmethod
def write_data_files(cls, dpext, out_path, d_vocab, d_label_answer):
data_fnames = [
("train", "question.train.token_idx.label"),
("valid", "question.dev.label.token_idx.pool"),
("test", "question.test1.label.token_idx.pool"),
# ("test2", "question.test2.label.token_idx.pool")
]
for dtype, data_fname in data_fnames:
data_path = os.path.join(dpext, data_fname)
cls.create_fb_format(out_path, dtype, data_path, d_vocab, d_label_answer)
@classmethod
def create_fb_format(cls, out_path, dtype, inpath, d_vocab, d_label_answer):
print('building fbformat:' + dtype)
fout = open(os.path.join(out_path, dtype + '.txt'), 'w')
lines = open(inpath).readlines()
for line in lines:
fields = line.rstrip("\n").split("\t")
if dtype == "train":
assert len(fields) == 2, "data file (%s) corrupted." % inpath
s_q_wids, s_good_aids = fields
q = cls.wids2sent(s_q_wids.split(), d_vocab)
good_ans = [d_label_answer[aid_] for aid_ in s_good_aids.split()]
# save good answers (train only)
s = '1 ' + q + '\t' + "|".join(good_ans)
fout.write(s + '\n')
else:
assert len(fields) == 3, "data file (%s) corrupted." % inpath
s_good_aids, s_q_wids, s_bad_aids = fields
q = cls.wids2sent(s_q_wids.split(), d_vocab)
good_ans = [d_label_answer[aid_] for aid_ in s_good_aids.split()]
bad_ans = [d_label_answer[aid_] for aid_ in s_bad_aids.split()]
# save good answers and candidates
s = (
'1 ' + q + '\t' + "|".join(good_ans) + '\t\t' +
"|".join(good_ans + bad_ans)
)
fout.write(s + '\n')
fout.close()
class ParseInsuranceQAV2(ParseInsuranceQA):
version = "V2"
label2answer_fname = "InsuranceQA.label2answer.token.encoded.gz"
@classmethod
def write_data_files(cls, dpext, out_path, d_vocab, d_label_answer):
data_fnames_tmpl = [
("train.%s", "InsuranceQA.question.anslabel.token.%s.pool.solr.train.encoded.gz"), # noqa: E501
("valid.%s", "InsuranceQA.question.anslabel.token.%s.pool.solr.valid.encoded.gz"), # noqa: E501
("test.%s", "InsuranceQA.question.anslabel.token.%s.pool.solr.test.encoded.gz") # noqa: E501
]
for n_cands in [100, 500, 1000, 1500]:
for dtype_tmp, data_fname_tmp in data_fnames_tmpl:
dtype = dtype_tmp % n_cands
data_fname = data_fname_tmp % n_cands
data_path = os.path.join(dpext, data_fname)
cls.create_fb_format(
out_path, dtype, data_path, d_vocab, d_label_answer
)
@classmethod
def create_fb_format(cls, out_path, dtype, inpath, d_vocab, d_label_answer):
print('building fbformat:' + dtype)
fout = open(os.path.join(out_path, dtype + '.txt'), 'w')
lines = cls.readlines(inpath)
for line in lines:
fields = line.rstrip("\n").split("\t")
if len(fields) != 4:
raise ValueError(
"data file (%s) corrupted. Line (%s)" % (repr(line), inpath)
)
else:
_, s_q_wids, s_good_aids, s_bad_aids = fields
q = cls.wids2sent(s_q_wids.split(), d_vocab)
good_ans = [d_label_answer[aid_] for aid_ in s_good_aids.split()]
bad_ans = [d_label_answer[aid_] for aid_ in s_bad_aids.split()]
# save
s = (
'1 ' + q + '\t' + "|".join(good_ans) + '\t\t' +
"|".join(good_ans + bad_ans)
)
fout.write(s + '\n')
fout.close()
def build(opt):
dpath = os.path.join(opt['datapath'], 'InsuranceQA')
version = '1'
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data from github.
fname = 'insuranceqa.zip'
url = 'https://github.com/shuzi/insuranceQA/archive/master.zip'
print('[downloading data from: ' + url + ']')
build_data.download(url, dpath, fname)
build_data.untar(dpath, fname)
ParseInsuranceQAV1.build(dpath)
ParseInsuranceQAV2.build(dpath)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/insuranceqa/build.py | 0.681833 | 0.237698 | build.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import json
import os
import re
def parse_ans(a):
a = a.lstrip('(list')
ans = ''
for a in re.split('\(description', a):
a = a.strip(' ()"()')
ans = ans + '|' + a
return ans.lstrip('|')
def create_fb_format(outpath, dtype, inpath):
print('building fbformat:' + dtype)
with open(inpath) as data_file:
data = json.load(data_file)
fout = open(os.path.join(outpath, dtype + '.txt'), 'w')
for i in range(len(data)):
q = data[i]['utterance']
a = parse_ans(data[i]['targetValue'])
s = '1 ' + q + '\t' + a
fout.write(s + '\n')
fout.close()
def build(opt):
dpath = os.path.join(opt['datapath'], 'WebQuestions')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
url = ('https://worksheets.codalab.org/rest/bundles/' +
'0x4a763f8cde224c2da592b75f29e2f5c2/contents/blob/')
build_data.download(url, dpath, 'train.json')
url = ('https://worksheets.codalab.org/rest/bundles/' +
'0xe7bac352fce7448c9ef238fb0a297ec2/contents/blob/')
build_data.download(url, dpath, 'test.json')
create_fb_format(dpath, 'train', os.path.join(dpath, 'train.json'))
create_fb_format(dpath, 'valid', os.path.join(dpath, 'train.json'))
create_fb_format(dpath, 'test', os.path.join(dpath, 'test.json'))
# Mark the data as built.
build_data.mark_done(dpath, version_string=version) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/webquestions/build.py | 0.674801 | 0.174692 | build.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import os
import json
from parlai.core.build_data import download
from parlai.core.params import ParlaiParser
from parlai.core.utils import ProgressLogger
import parlai.core.build_data as build_data
def download_images(opt):
dpath = os.path.join(opt['datapath'], 'personality_captions')
image_path = os.path.join(dpath, 'images')
version = '1.0'
response = input(
'Please confirm that you have obtained permission '
'to work with the YFCC100m dataset, as outlined by the steps '
'listed at '
'https://multimediacommons.wordpress.com/yfcc100m-core-dataset/ [Y/y]: ')
if response.lower() != 'y':
raise RuntimeError('In order to use the images from this dataset, '
'you must obtain permission from the webpage above.')
response = input(
'NOTE: This script will download each image individually from the '
's3 server on which the images are hosted. This will take a *very '
'long* time. Are you sure you would like to continue? [Y/y]: '
)
if response.lower() != 'y':
raise RuntimeError('If you have access to the images, please specify '
'the path to the folder via the `--yfcc-path` '
'command line argument.')
image_prefix = 'https://multimedia-commons.s3-us-west-2.amazonaws.com/data/images'
logger = ProgressLogger(throttle=0.1, should_humanize=False)
hashes = []
for dt in ['train', 'val', 'test']:
with open(os.path.join(dpath, '{}.json'.format(dt))) as f:
data = json.load(f)
hashes += [d['image_hash'] for d in data]
os.makedirs(image_path, exist_ok=True)
print('[downloading images to {}]'.format(image_path))
for i, (p_hash) in enumerate(hashes):
image_url = '{}/{}/{}/{}.jpg'.format(
image_prefix,
p_hash[:3],
p_hash[3:6],
p_hash)
download(image_url, image_path, '{}.jpg'.format(p_hash))
logger.log(i, len(hashes))
build_data.mark_done(image_path, version)
if __name__ == '__main__':
parser = ParlaiParser()
download_images(parser.parse_args()) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/personality_captions/download_images.py | 0.634204 | 0.242789 | download_images.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import os
def _process(fname, fout):
with open(fname) as f:
lines = [line.strip('\n') for line in f]
# main article
s = '1 ' + lines[2]
# add question
s = s + ' ' + lines[4]
# add answer
s = s + '\t' + lines[6]
# add candidates (and strip them of the real names)
for i in range(8, len(lines)):
lines[i] = lines[i].split(':')[0]
s = s + '\t\t' + '|'.join(lines[8:])
fout.write(s + '\n\n')
def create_fb_format(outpath, dtype, inpath):
print('building fbformat:' + dtype)
with open(os.path.join(outpath, dtype + '.txt'), 'w') as fout:
for f in os.listdir(inpath):
if f.endswith('.question'):
fname = os.path.join(inpath, f)
_process(fname, fout)
def build(opt):
version = 'v1.0'
dpath = os.path.join(opt['datapath'], 'QADailyMail')
if not build_data.built(dpath, version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
fname = 'qadailymail.tar.gz'
gd_id = '0BwmD_VLjROrfN0xhTDVteGQ3eG8'
build_data.download_from_google_drive(gd_id, os.path.join(dpath, fname))
build_data.untar(dpath, fname)
ext = os.path.join('dailymail', 'questions')
create_fb_format(dpath, 'train', os.path.join(dpath, ext, 'training'))
create_fb_format(dpath, 'valid', os.path.join(dpath, ext, 'validation'))
create_fb_format(dpath, 'test', os.path.join(dpath, ext, 'test'))
# Mark the data as built.
build_data.mark_done(dpath, version) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/qadailymail/build.py | 0.631367 | 0.156266 | build.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Teachers for the MovieDialog task.
From Dodge et al. '15. Link: https://arxiv.org/abs/1511.06931
Task 1: Closed-domain QA dataset asking templated questions about movies,
answerable from Wikipedia.
Task 2: Questions asking for movie recommendations.
Task 3: Dialogs discussing questions about movies as well as recommendations.
Task 4: Dialogs discussing Movies from Reddit (the /r/movies SubReddit).
"""
from parlai.core.teachers import FbDialogTeacher
from parlai.core.agents import MultiTaskTeacher
from .build import build
import copy
import os
tasks = {}
tasks[1] = os.path.join('task1_qa', 'task1_qa_pipe_')
tasks[2] = os.path.join('task2_recs', 'task2_recs_')
tasks[3] = os.path.join('task3_qarecs', 'task3_qarecs_pipe_')
tasks[4] = os.path.join('task4_reddit', 'task4_reddit',
'task4_reddit_pipeless_')
def _path(task, opt):
# Build the data if it doesn't exist.
build(opt)
suffix = ''
dt = opt['datatype'].split(':')[0]
if dt == 'train':
suffix = 'train'
elif dt == 'test':
suffix = 'test'
elif dt == 'valid':
suffix = 'dev'
datafile = os.path.join(opt['datapath'], 'MovieDialog',
'movie_dialog_dataset', '{t}{s}.txt'.format(
t=tasks[int(task)], s=suffix))
if int(task) == 4:
if dt == 'train':
candpath = None
else:
candpath = datafile.replace(
suffix + '.txt', 'cand-{dt}.txt'.format(dt=dt))
else:
candpath = os.path.join(opt['datapath'], 'MovieDialog',
'movie_dialog_dataset', 'entities.txt')
return datafile, candpath
# The knowledge base of facts that can be used to answer questions.
class KBTeacher(FbDialogTeacher):
"""Simple text entry with each movie's facts in the knowledge base."""
def __init__(self, opt, shared=None):
"""Initialize teacher."""
build(opt)
opt['datafile'] = os.path.join(opt['datapath'], 'MovieDialog',
'movie_dialog_dataset', 'movie_kb.txt')
super().__init__(opt, shared)
# Single task.
class TaskTeacher(FbDialogTeacher):
"""Teacher with single task, specified by moviedialog:task:N."""
def __init__(self, opt, shared=None):
"""Initialize teacher."""
try:
# expecting "moviedialog:task:N"
self.task = opt['task'].split(':')[2]
except IndexError:
self.task = '1' # default task
opt['datafile'], opt['cands_datafile'] = _path(self.task, opt)
super().__init__(opt, shared)
# By default train on all tasks at once.
class DefaultTeacher(MultiTaskTeacher):
"""By default will load teacher with all four tasks."""
def __init__(self, opt, shared=None):
"""Initialize teacher."""
opt = copy.deepcopy(opt)
opt['task'] = ','.join('moviedialog:Task:%d' % (i + 1)
for i in range(len(tasks)))
super().__init__(opt, shared) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/moviedialog/agents.py | 0.734024 | 0.285908 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import FbDialogTeacher
from parlai.core.agents import MultiTaskTeacher
from .build import build
import copy
import os
def _path(exsz, task, opt, dt=''):
# Build the data if it doesn't exist.
build(opt)
if dt == '':
dt = opt['datatype'].split(':')[0]
return os.path.join(opt['datapath'], 'bAbI', 'tasks_1-20_v1-2',
'en-valid{exsz}-nosf'.format(exsz=exsz),
'qa{task}_{type}.txt'.format(task=task, type=dt))
def mod_labels(ys, task):
if ys is not None:
# replace comma-labeled babi tasks with spaces
# this is more friendly to our tokenizer which makes commas full tokens
# this way models won't be penalized for not generating a comma
if task == '8':
# holding: labels like 'milk,cookies,football'
# replace with spaces 'milk football cookies'
ys = [y.replace(',', ' ') for y in ys]
elif task == '19':
# pathfinding: labels like 'n,e' or 's,w'
# replace with spaces, 'n e'
ys = [y.replace(',', ' ') for y in ys]
return ys
# Single bAbI task (1k training).
class Task1kTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
task = opt.get('task', 'babi:Task1k:1')
self.task_num = task.split(':')[2]
opt['datafile'] = _path('', self.task_num, opt)
opt['cands_datafile'] = _path('', task.split(':')[2], opt, 'train')
super().__init__(opt, shared)
def setup_data(self, path):
for entry, new in super().setup_data(path):
entry[1] = mod_labels(entry[1], self.task_num)
yield entry, new
def load_cands(self, path):
return mod_labels(super().load_cands(path), self.task_num)
# Single bAbI task (10k training).
class Task10kTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
task = opt.get('task', 'babi:Task10k:1')
self.task_num = task.split(':')[2]
opt['datafile'] = _path('-10k', self.task_num, opt)
opt['cands_datafile'] = _path('-10k', task.split(':')[2], opt, 'train')
super().__init__(opt, shared)
def setup_data(self, path):
for entry, new in super().setup_data(path):
entry[1] = mod_labels(entry[1], self.task_num)
yield entry, new
def load_cands(self, path):
return mod_labels(super().load_cands(path), self.task_num)
# By default train on all tasks at once.
class All1kTeacher(MultiTaskTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['task'] = ','.join('babi:Task1k:%d' % (i + 1) for i in range(20))
super().__init__(opt, shared)
# By default train on all tasks at once.
class All10kTeacher(MultiTaskTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['task'] = ','.join('babi:Task10k:%d' % (i + 1) for i in range(20))
super().__init__(opt, shared)
# By default train on all tasks at once.
class DefaultTeacher(All1kTeacher):
pass | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/babi/agents.py | 0.700178 | 0.18969 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import DialogTeacher
from .build import build
import os
import copy
import csv
import glob
def _path(opt):
build(opt)
dt = opt['datatype'].split(':')[0]
if not (dt == 'train' or dt == 'valid' or dt == 'test'):
raise RuntimeError('Not valid datatype.')
suffix = dt
data_path = os.path.join(opt['datapath'], 'NarrativeQA',
'narrative_qa', suffix)
return data_path
class SummariesTeacher(DialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
data_path = _path(opt)
opt['datafile'] = data_path
self.id = 'NarrativeQA'
super().__init__(opt, shared)
def setup_data(self, path):
print('loading data from: ' + path)
qa_path = os.path.join(path, 'qaps.csv')
summaries_path = os.path.join(path, 'summaries.csv')
qa_pairs = dict()
with open(qa_path, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
if row['document_id'] not in qa_pairs:
qa_pairs[row['document_id']] = []
qa_pairs[row['document_id']].append(row)
with open(summaries_path, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
info = 'Summary: %s' % row['summary_tokenized']
for i, qa in enumerate(qa_pairs[row['document_id']]):
question = qa['question_tokenized']
answer1 = qa['answer1_tokenized']
answer2 = qa['answer2_tokenized']
if i == 0:
# Prepend start info in first question
yield (info + '\n' + question,
[answer1, answer2]), True
else:
yield (question, [answer1, answer2]), False
class DefaultTeacher(DialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
data_path = _path(opt)
opt['datafile'] = data_path
self.id = 'NarrativeQA'
super().__init__(opt, shared)
def setup_data(self, path):
print('loading data from: ' + path)
qa_path = os.path.join(path, 'qaps.csv')
documents_path = os.path.join(path, 'documents.csv')
stories_base_path = os.path.join(path, '..', 'stories')
qa_pairs = dict()
print("%s stories found." %
len(glob.glob(os.path.join(stories_base_path, "*.content"))))
with open(qa_path, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
if row['document_id'] not in qa_pairs:
qa_pairs[row['document_id']] = []
qa_pairs[row['document_id']].append(row)
with open(documents_path, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
story_path = os.path.join(stories_base_path,
row['document_id'] + '.content')
if not os.path.exists(story_path):
continue
story = None
with open(story_path, 'r', encoding='utf-8',
errors='ignore') as f:
story = f.read().strip()
info = 'Title: %s' % row['wiki_title']
info += '\nKind: %s' % row['kind']
info += '\nStory url: %s' % row['story_url']
info += '\nStory start: %s' % row['story_start']
info += '\nStory end: %s' % row['story_end']
info += '\nStory: %s' % story
for i, qa in enumerate(qa_pairs[row['document_id']]):
question = qa['question_tokenized']
answer1 = qa['answer1_tokenized']
answer2 = qa['answer2_tokenized']
if i == 0:
# Prepend start info in first question
yield (info + '\n' + question, [
answer1, answer2]), True
else:
yield (question, [answer1, answer2]), False | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/narrative_qa/agents.py | 0.615203 | 0.166438 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import glob
import gzip
import multiprocessing
import os
import re
import sys
import time
import xml.etree.ElementTree as ET
from parlai.core.utils import ProgressLogger
NUM_MOVIE_FOLDERS = 140044
NUM_SUBTITLES_FILES = 446612
MAX_TIME_DIFFERENCE_S = 2
MIN_WORD_LENGTH = 2
MAX_WORD_LENGTH = 20
# remove brackets
CLEAN_BRACKETS_REGEX = re.compile(
'<!--.*?-->|<[^>]*>|\([^\)]*\)|\[[^\]]*\]|\{[^\}]*\}|##|~'
)
# Usually, unbalanced brackets correspond to very noisy sentences
# '#' is usually pretty bad and means lyrics of the song
BRACKETS_CHARACTERS = ['[', ']', '(', ')', '{', '}', '<', '>', '#']
MULTI_WHITESPACES_REGEX = re.compile(r'\s+')
# Existing apostrophe tokenization in Open Subtitles is not compatible with nltk
APOSTROPHE_REPLACEMENT_REGEX = [
(re.compile(r"(\s?)n(\s?)'(\s?)t(\s|$)"), "\\1n't\\4"),
(re.compile(r"'(\s?)(s|re|em|im|bout|cause|ve|d|ll|ne)(\s+|$)"), " '\\2\\3"),
# it's a common (in OpenSubtitles) spelling error to use 'il instead of 'll
(re.compile(r"'(\s?)il(\s|$)"), " 'll\\2"),
(re.compile(r"(\s|^)i(\s?)'(\s?)(m|mm)(\s|$)"), "\\1i 'm\\5"),
(re.compile(r"in(\s?)'(\s|$)"), "ing\\2"),
(re.compile(r"(\s|^)ma(\s?)'(\s?)am(\s|$)"), "\\1ma'am\\4"),
(re.compile(r"(\s|^)c(\s?)'(\s?)mon(\s|$)"), "\\1c'mon\\4"),
(re.compile(r"(\s|^)o(\s?)'(\s?)clock(\s|$)"), "\\1o'clock\\4"),
(re.compile(r"(\s|^)y(\s?)'(\s?)all(\s|$)"), "\\1y'all\\4"),
]
# Some cleaning steps are taken from
CLEANUP_REGEX_RULES = [
# remove speaker tag "xxx: "
(re.compile(r'^\s*[A-z]*\s*:'), ''),
# remove unnecessary symbols
(re.compile(r"-{2,}"), ' '),
# delete a space right before a period for titles
(re.compile(r'(?<=( mr| jr| ms| dr| st|mrs)) \.'), '. '),
]
CLEANUP_REPLACE_RULES = [
('"', ' '),
("``", " "),
("''", " "),
("% %", " "),
("i̇", "i"),
]
def get_movie_id(filename_path):
dirpath, filename = os.path.split(filename_path)
_, movie_id_str = os.path.split(dirpath)
return int(movie_id_str)
# OpenSubtitles2016 contains have several subtitles per movie,
# stored in a separate folders.
# We gather all subtitles files based on the movie they correspond to
# and apply deduplication for the extracted replicas
def get_list_of_files(top_path):
result = {}
for path, dirs, files in os.walk(top_path):
for filename in files:
if filename.endswith('.xml.gz'):
full_filename = os.path.realpath(os.path.join(path, filename))
assert os.path.isfile(full_filename), 'Bad file ' + full_filename
movie_id = get_movie_id(full_filename)
if movie_id not in result:
result[movie_id] = []
result[movie_id].append(full_filename)
return result
def parse_xml(filepath):
extension = os.path.splitext(filepath)[1]
if extension == '.gz':
with gzip.open(filepath, 'r') as f:
return ET.parse(f)
else:
return ET.parse(filepath)
def normalize_whitespaces(sentence):
return MULTI_WHITESPACES_REGEX.sub(' ', sentence).strip()
def normalize_apostrophe(sentence):
sentence = normalize_whitespaces(sentence)
for rule in APOSTROPHE_REPLACEMENT_REGEX:
sentence = rule[0].sub(rule[1], sentence)
return sentence
def clean_text(words):
if len(words) > 0 and words[-1] == ':':
return None
sentence = ' '.join(words).strip(' -').lower()
sentence = CLEAN_BRACKETS_REGEX.sub('', sentence)
if len([ch for ch in BRACKETS_CHARACTERS if ch in sentence]) > 0:
return None
sentence = sentence.replace('\\\'', '\'')
if sentence.count('"') % 2 == 1:
# There are unmatched double-quotes.
# Usually, it means a quote got splitted into separate utterances,
# so it's bad example of a dialog
return None
sentence = normalize_apostrophe(sentence)
for (regex, replacement) in CLEANUP_REGEX_RULES:
sentence = regex.sub(replacement, sentence)
for (pattern, replacement) in CLEANUP_REPLACE_RULES:
sentence = sentence.replace(pattern, replacement)
words = normalize_whitespaces(sentence).split()
if (
len(words) > 0 and
any(map(lambda k: re.search(r'\w', k) is not None, words)) and
len(words) >= MIN_WORD_LENGTH and
len(words) <= MAX_WORD_LENGTH
):
return ' '.join(words)
else:
return None
def parse_time_str(time_value_str):
if not(
time_value_str is not None and
len(time_value_str) == 12 and
time_value_str[2] == ':' and
time_value_str[5] == ':' and
time_value_str[8] == ','
):
return None
try:
return (
int(time_value_str[0:2]) * 3600 +
int(time_value_str[3:5]) * 60 +
int(time_value_str[6:8])
)
except ValueError:
return None
def extract_data_from_xml(xml_object):
previous_end_time = -1000
conversation = []
for sentence_node in xml_object.getroot():
if sentence_node.tag != 's':
continue
words = []
start_time, end_time = None, None
for node in sentence_node:
if node.tag == 'time':
time_value = parse_time_str(node.get('value'))
if time_value is None:
continue
if node.get('id')[-1] == 'S':
start_time = (
time_value if start_time is None
else min(time_value, start_time)
)
elif node.get('id')[-1] == 'E':
end_time = (
time_value if end_time is None
else max(time_value, end_time)
)
else:
raise Exception('Unknown time-id for node: %s' % node)
elif node.tag == 'w':
if node.text is not None and len(node.text) > 0:
words.append(node.text)
else:
pass
sentence = clean_text(words)
start_time = start_time or previous_end_time
end_time = end_time or previous_end_time
# add to the conversation
# flush and start new conversation
if (sentence is not None and
start_time - previous_end_time <= MAX_TIME_DIFFERENCE_S):
conversation.append(sentence)
else:
if len(conversation) > 1:
yield conversation
conversation = []
if sentence is not None:
conversation.append(sentence)
previous_end_time = max(start_time, end_time)
def conversation_to_fb_format(conversation):
assert len(conversation) > 1
lines = []
for i in range(0, len(conversation), 2):
if i + 1 < len(conversation):
lines.append('%d %s\t%s' % (
i / 2 + 1, conversation[i], conversation[i + 1]
))
else:
lines.append('%d %s' % (i / 2 + 1, conversation[i]))
return '\n'.join(lines)
def conversation_to_basic_format(conversation):
assert len(conversation) > 1
lines = []
for i in range(len(conversation)):
if i + 1 < len(conversation):
lines.append('1 %s\t%s' % (conversation[i], conversation[i + 1]))
return '\n'.join(lines)
class DataProcessor(object):
def __init__(self, use_history):
self.use_history = use_history
def __call__(self, movie_id_with_files):
movie_id, files = movie_id_with_files
data = set()
for filepath in files:
try:
xml_object = parse_xml(filepath)
for conversation in extract_data_from_xml(xml_object):
if self.use_history:
data.add(conversation_to_fb_format(conversation))
else:
data.add(conversation_to_basic_format(conversation))
except ET.ParseError as e:
# TODO: We possibly can log these errors,
# but I'm not sure how it would intervene with the PrograssLogger
pass
except Exception:
print(
'Unexpected error for file %s:\n%s' % (filepath, sys.exc_info()[0]),
file=sys.stderr,
)
raise
data_str = '\n'.join(data) + ('\n' if len(data) > 0 else '')
return data_str
def create_fb_format(inpath, outpath, use_history):
print('[building fbformat]')
start_time = time.time()
ftrain = open(os.path.join(outpath, 'train.txt'), 'w')
fvalid = open(os.path.join(outpath, 'valid.txt'), 'w')
ftest = open(os.path.join(outpath, 'test.txt'), 'w')
movie_dirs = get_list_of_files(inpath)
total_movie_dirs = len(movie_dirs)
total_files = sum([len(l) for l in movie_dirs.values()])
print(
'[Found %d movie folders and %d subtitles within %s in %d seconds]' % (
total_movie_dirs,
total_files,
inpath,
time.time() - start_time,
)
)
assert total_movie_dirs == NUM_MOVIE_FOLDERS, 'Incorrect number of movies'
assert total_files == NUM_SUBTITLES_FILES, 'Incorrect number of files'
processor = DataProcessor(use_history)
logger = ProgressLogger()
with multiprocessing.Pool(processes=os.cpu_count()) as pool:
for i, s in enumerate(pool.imap(processor, movie_dirs.items())):
handle = ftrain
# TODO: Shall we use smaller valid/test sets? Even 10% is A LOT here
if i % 10 == 0:
handle = ftest
if i % 10 == 1:
handle = fvalid
handle.write(s)
logger.log(i, total_files)
ftrain.close()
fvalid.close()
ftest.close()
print(
'[Data has been successfully extracted in %d seconds]' % (
time.time() - start_time,
)
)
def build(datapath, use_history):
dpath = os.path.join(datapath, 'OpenSubtitles2018')
if not use_history:
dpath += '_no_history'
version = '1'
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
untar_path = os.path.join(dpath, 'OpenSubtitles2018', 'xml', 'en')
if len(glob.glob(untar_path + '/*/*/*.xml.gz')) != NUM_SUBTITLES_FILES:
# Download the data.
url = (
'http://opus.lingfil.uu.se/download.php?f=OpenSubtitles2018/en.tar.gz'
)
build_data.download(url, dpath, 'OpenSubtitles2018.tar.gz')
build_data.untar(dpath, 'OpenSubtitles2018.tar.gz')
create_fb_format(untar_path, dpath, use_history)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
return dpath | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/opensubtitles/build_2018.py | 0.613352 | 0.358269 | build_2018.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.