code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import boto3, datetime, re, time, json
from dataclasses import dataclass
from typing import Union, Optional
from RW.AWS.mixins.AWSAuthenticationMixin import AWSAuthenticationMixin
class CloudFormation(AWSAuthenticationMixin):
"""
AWS CloudFormation keyword library for integrating with AWS CloudFormation.
"""
ROBOT_LIBRARY_SCOPE = "GLOBAL"
def get_stack_events(self, stack_name):
"""
Gets a list of stack events under a name as json.
Examples:
| RW.AWS.CloudFormation.Get Stack Events | MyStackName |
Return Value:
| stack_event_info: json |
"""
client = self.get_client("cloudformation")
rsp = client.describe_stack_events(StackName=stack_name)
events = rsp["StackEvents"]
while "NextToken" in rsp:
rsp = client.describe_stack_events(NextToken=rsp["NextToken"])
events.extend(rsp["StackEvents"])
return events
def get_all_stack_events(self, event_status="", seconds_in_past=None):
"""
Gets all stack events across all stacks and filters them.
Examples:
| RW.AWS.CloudFormation.Get All Stack Events | CREATE_COMPLETE | 600 |
Return Value:
| stack_event_info: json |
"""
stacks = [s["StackName"] for s in self.get_stack_summaries()]
events = []
for s in stacks:
stack_events = self.get_stack_events(s)
if event_status:
stack_events = self.filter_stack_events_by_status(stack_events, event_status)
if seconds_in_past:
stack_events = self.filter_stack_events_by_time(stack_events, seconds_in_past)
events.append(stack_events)
return events
def get_stack_summaries(self):
"""
Get a list of summaries for each stack.
Examples:
| RW.AWS.CloudFormation.Get Stack Summaries |
Return Value:
| stack_summaries: json |
"""
client = self.get_client("cloudformation")
rsp = client.list_stacks()
summaries = rsp["StackSummaries"]
while "NextToken" in rsp:
rsp = client.list_stacks(NextToken=rsp["NextToken"])
summaries.extend(rsp["StackSummaries"])
return summaries
def filter_stack_events(self, events, event_status):
"""
*DEPRECATED*
Filters out stack events which do not match the status.
"""
filtered_events = [e for e in events if e["ResourceStatus"] == event_status]
return filtered_events
def filter_stack_events_by_status(self, events, event_status):
"""
Filters out stack events which do not match the status.
"""
filtered_events = [e for e in events if e["ResourceStatus"] == event_status]
return filtered_events
def filter_stack_events_by_time(self, events, seconds_in_past):
"""
Filters out stack events older than seconds back in the past provided.
"""
utc_today = datetime.datetime.utcnow()
utc_past = utc_today - datetime.timedelta(seconds=int(seconds_in_past))
filtered_events = [e for e in events if e["Timestamp"].replace(tzinfo=None) >= utc_past]
return filtered_events
def json_stringify(self, response_data):
"""
Helper method for platform compatibility. Checks data is safe to load as json.
If this method encounters a type it cannot understand, it change it to a string.
"""
response_json = json.dumps(response_data, default=str)
response_data = json.loads(response_json)
return response_data | /runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/AWS/CloudFormation.py | 0.820685 | 0.26169 | CloudFormation.py | pypi |
import json
import urllib
import re
import dateutil.parser
import google.auth.transport.requests
from datetime import datetime, timezone
from dataclasses import dataclass
from google.oauth2 import service_account
from google.cloud import monitoring_v3, logging
from google.protobuf.json_format import MessageToDict
from typing import Optional
from RW.Utils import parse_timedelta
from RW import platform, Prometheus
class OpsSuite():
#TODO: move helpers to utils
#TODO: update docstrings
"""
Operations Suite keyword library
"""
ROBOT_LIBRARY_SCOPE = "GLOBAL"
def __init__(self):
self._credentials = None
def authenticate(self, service_account_json: platform.Secret):
"""
Sets the google service account credentials from a platform secret containing json.
- ``service_account_json`` the secret containing a json string from a google account credentials file.
Examples:
| RW.GCP.OpsSuite.Set Opssuite Credentials | ${opssuite_sa_creds}
"""
if not service_account_json:
raise ValueError(f"service_account is empty")
sa = json.loads(service_account_json.value, strict=False)
self._credentials = service_account.Credentials.from_service_account_info(sa)
def get_credentials(self) -> object:
"""
Return the credentials.
:return: The credentials
"""
return self._credentials
def get_token(self, gcp_credentials : platform.Secret=None) -> platform.Secret:
"""
Retrieve short lived bearer token from service account authentication in the form of a platform secret.
Examples:
| RW.GCP.OpsSuite.Get Token | gcp_credentials=${ops-suite-sa}
Return Value:
| A secret in the form of key=token value=access_token, good for 3600s. |
"""
if not gcp_credentials:
raise ValueError(f"service_account is empty")
sa = json.loads(gcp_credentials.value, strict=False)
creds = service_account.Credentials.from_service_account_info(sa, scopes=['https://www.googleapis.com/auth/cloud-platform'])
# See https://cloud.google.com/docs/authentication/token-types#access-contents
# Access tokens are by default good for 1 hour / 3600 seconds
# https://github.com/googleapis/google-auth-library-python/blob/main/google/oauth2/service_account.py
request = google.auth.transport.requests.Request()
creds.refresh(request)
return platform.Secret(key="token", val=creds.token)
def get_access_token_header(self, gcp_credentials : platform.Secret=None) -> platform.Secret:
"""
Retrieve an access token header with a short lived bearer token from service account
authentication in the form of a platform secret.
Examples:
| RW.GCP.OpsSuite.Get Access Token Header | gcp_credentials=${ops-suite-sa}
Return Value:
| A secret in the form of key=optional_headers value='{"Authorization": "Bearer [token]"}', good for 3600s. |
"""
access_token = self.get_token(gcp_credentials)
access_token_header = {"Authorization":"Bearer {}".format(access_token.value)}
return platform.Secret(key="optional_headers", val=json.dumps(access_token_header))
def run_mql(self, project_name, mql_statement, sort_most_recent=True):
"""
*DEPRECATED*
Runs a MQL statement against a project ID in Google cloud, and returns a timeseries of monitoring data.
- ``project_name`` the Google Cloud Project ID
- ``mql_statement`` is the MQL statement to execute.
``tip:`` you can play with a MQL statement in the Google Cloud Console and paste it into the SLI config.
Examples:
| RW.GCP.OpsSuite.Run Mql | ${PROJECT_ID} | ${MQL_STATEMENT}
Return Value:
| response dict |
"""
client = monitoring_v3.QueryServiceClient(credentials=self.get_credentials())
request = monitoring_v3.QueryTimeSeriesRequest(
name=f"projects/{project_name}",
query=mql_statement,
)
page_result = client.query_time_series(request=request)
rsp = [type(r).to_dict(r) for r in page_result]
return rsp
def metric_query(self, project_name, mql_statement, no_result_overwrite, no_result_value, gcp_credentials : platform.Secret=None, sort_most_recent=True):
"""
Runs a MQL statement against a project ID in Google cloud, and returns a timeseries of monitoring data.
- ``project_name`` the Google Cloud Project ID
- ``mql_statement`` is the MQL statement to execute.
``tip:`` you can play with a MQL statement in the Google Cloud Console and paste it into the SLI config.
Examples:
| RW.GCP.OpsSuite.Run Mql | ${PROJECT_ID} | ${MQL_STATEMENT} |
Return Value:
| response dict |
"""
if gcp_credentials:
self.authenticate(gcp_credentials)
client = monitoring_v3.QueryServiceClient(credentials=self.get_credentials())
request = monitoring_v3.QueryTimeSeriesRequest(
name=f"projects/{project_name}",
query=mql_statement,
)
page_result = client.query_time_series(request=request)
rsp = [type(r).to_dict(r) for r in page_result] # convert protobuf rsp to dict
if no_result_overwrite == 'Yes':
if not rsp:
metric = int(no_result_value)
else:
metric = self._extract_metric_from_mql_result(rsp, sort_most_recent)
else:
metric = self._extract_metric_from_mql_result(rsp, sort_most_recent)
return metric
def _extract_metric_from_mql_result(
self,
metric_query_result: {},
sort_most_recent: bool,
data_key="point_data",
) -> {}:
# TODO: convert to an extract/parse strategy
metric_data = metric_query_result[0][data_key]
if len(metric_data) == 0:
raise ValueError(f"The MQL result set has 0 results: {metric_query_result}")
if sort_most_recent:
metric_data = sorted(metric_data, key=lambda d: dateutil.parser.parse(d["time_interval"]["end_time"]))
# first access mql result array, access values list, get 0th entry which has/can be sorted to most recent
# then get dict values so we don't need to check keys, cast values to list and get 0th
metric = list(metric_data[0]["values"][0].values())[0]
# first attempt regular format
try:
metric = format(float(metric), "f")
except:
# TODO: log exception before continuing
# remove alpha characters from value and assume float cast
metric = float(''.join(i for i in str(metric) if i.isdigit() or i in ['.', '-']))
return metric
def get_last_point_in_series_set(self, mql_result, label_key="label_values", data_key="point_data"):
"""
*DEPRECATED*
Removes all data points except the most recent for each instance in the MQL result set.
- ``mql_result`` the results from an MQL statement
Examples:
| RW.GCP.OpsSuite.Get Last Point In Series Set | ${rsp}
Return Value:
| results dict |
"""
parsed_points_set = []
for series in mql_result:
if series[data_key]:
parsed_points_set.append({label_key: series[label_key], data_key: series[data_key][0]})
return parsed_points_set
def average_numeric_across_instances(
self,
data_points,
label_key="label_values",
data_key="point_data",
point_type="double_value"
):
"""
*DEPRECATED*
Returns the average of a MQL result set containing numerical data points.
- ``data_points`` the results from an MQL statement parsed to have singular data pointers per instance
Examples:
| RW.GCP.OpsSuite.Average Numeric Across Instances | ${parsed_points}
Return Value:
| average float |
"""
avg = sum([d[data_key]["values"][0][point_type] for d in data_points])/len(data_points)
return avg
def highest_numeric_across_instances(
self,
data_points,
label_key="label_values",
data_key="point_data",
point_type="double_value"
):
"""
*DEPRECATED*
Returns the highest value from a MQL result set.
- ``data_points`` the results from an MQL statement parsed to have singular data pointers per instance
Examples:
| RW.GCP.OpsSuite.Highest Numeric Across Instances | ${parsed_points}
Return Value:
| highest numeric |
"""
values = [d[data_key]["values"][0][point_type] for d in data_points]
highest = max(values)
return highest
def sum_numeric_across_instances(
self,
data_points,
label_key="label_values",
data_key="point_data",
point_type="double_value"
):
"""
*DEPRECATED*
Returns the sum of values from a MQL result set.
- ``data_points`` the results from an MQL statement parsed to have singular data pointers per instance
Examples:
| RW.GCP.OpsSuite.Sum Numeric Across Instances | ${parsed_points}
Return Value:
| numeric sum |
"""
if point_type == "double_value":
values = [float(d[data_key]["values"][0][point_type]) for d in data_points]
elif point_type == "int64_value":
values = [int(d[data_key]["values"][0][point_type]) for d in data_points]
return sum(values)
def remove_units(
self,
data_points,
label_key="label_values",
data_key="point_data",
point_type="double_value"
):
"""
*DEPRECATED*
Iterates over a MQL result set and removes alpha characters to allow math ops.
- ``data_points`` the results from an MQL statement parsed to have singular data pointers per instance
Examples:
| RW.GCP.OpsSuite.Remove Units | ${parsed_points}
Return Value:
| MQL result set with numerical data points |
"""
cleaned = []
for d in data_points:
data = d[data_key]["values"][0][point_type]
data = float(''.join(i for i in data if i.isdigit() or i in ['.', '-']))
d[data_key]["values"][0][point_type] = data
cleaned.append(d)
return cleaned
def get_gce_logs(
self, project_name: str = None, log_filter: str = None, limit: int = 1000, gcp_credentials:platform.Secret=None, logger_name: str = "stderr"
) -> object:
"""
Get logs from GCE logging based on filter
Note: because we're forgoeing the use of the generator to provide an easy interface, using a limit and filter is important for performance
return: str
"""
if gcp_credentials:
self.authenticate(gcp_credentials)
logging_client = logging.Client(credentials=self.get_credentials())
logger = logging_client.logger(f"{logger_name}")
logs = []
for log in logger.list_entries(
resource_names=[f"projects/{project_name}"],
filter_=log_filter,
max_results=limit,
order_by="timestamp desc",
):
logs.append(log.payload)
return json.dumps(logs)
def get_logs_dashboard_url(self, project_id: str, gcloud_filter: str, hostname: str = "https://console.cloud.google.com/logs/query") -> str:
"""
Generates a encoded URL to a gcloud logging dashboard with the equivalent query used to detect reported errors
BUG: using '>=' or similar operators in the query string can break the url when the request hits the dashboard; eg: use '>' instead
return: str
"""
params = {"project": project_id}
# quote filter separately since it uses different server separator symbol
encoded_filter = ";query=" + urllib.parse.quote(gcloud_filter)
url = hostname + encoded_filter + "?" + urllib.parse.urlencode(params, quote_via=urllib.parse.quote)
return url
def add_time_range(self, base_query, within_time:str="1h") -> str:
past_time = (datetime.now(timezone.utc) - parse_timedelta(within_time)).strftime("%Y-%m-%dT%H:%M:%SZ")
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
time_range = f" AND timestamp > \"{past_time}\" AND timestamp < \"{now}\""
time_ranged_query = base_query + time_range
return time_ranged_query | /runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/GCP/OpsSuite.py | 0.579162 | 0.191101 | OpsSuite.py | pypi |
import uuid
from typing import List, Optional
from runzero.client import Client
from runzero.types import Site, SiteOptions
__all__ = [
"SiteOptions",
"Sites",
]
class Sites:
"""Management of runZero sites.
Assets, tasks, and other objects are contained within or associated with Sites.
:param client: A handle to the :class:`runzero.Client` client which manages interactions
with the runZero server.
"""
_ENDPOINT = "api/v1.0/org/sites"
def __init__(self, client: Client):
"""Constructor method"""
self._client = client
def get_all(self, org_id: uuid.UUID) -> List[Site]:
"""
Retrieves all runZero Sites available within the given organization
:param org_id: The ID of the organization to operate against
:returns: a list of all Sites available within the given organization
:raises: AuthError, ClientError, ServerError
"""
params = {"_oid": org_id}
res = self._client.execute("GET", self._ENDPOINT, params=params)
result: List[Site] = []
for site in res.json_obj:
result.append(Site.parse_obj(site))
return result
def get(self, org_id: uuid.UUID, name: Optional[str] = None, site_id: Optional[uuid.UUID] = None) -> Optional[Site]:
"""
Retrieves the runZero Site with the provided name or id, if it exists in your account
:param org_id: The ID of the organization to operate against
:param name: Optional name of the site to retrieve. If not provided, must provide site_id.
:param site_id: Optional id of the site to retrieve. If not provided, must provide name.
:returns: site requested or None
:raises: AuthError, ClientError, ServerError,
ValueError if neither site_id nor name are provided.
"""
params = {"_oid": org_id}
if name is None and site_id is None:
raise ValueError("must provide site_id or site name")
if site_id is not None:
res = self._client.execute("GET", f"{self._ENDPOINT}/{site_id}", params=params)
if not res:
return None
site_obj = res.json_obj
data_obj = site_obj.get("data", "")
if data_obj:
site_obj = data_obj
return Site.parse_obj(site_obj)
# name
for site in self.get_all(org_id):
if site.name == name:
return site
return None
def create(self, org_id: uuid.UUID, site_options: SiteOptions) -> Optional[Site]:
"""
Creates a new site in the given org.
:param org_id: The ID of the organization to operate against
:param site_options: Description of site to create
:returns: Site created or None
:raises: AuthError, ClientError, ServerError
"""
params = {"_oid": org_id}
res = self._client.execute("PUT", self._ENDPOINT, params=params, data=site_options)
site_data = res.json_obj.get("data", "")
if site_data:
return self.get(org_id=org_id, name=site_options.name)
return Site.parse_obj(res.json_obj)
def update(self, org_id: uuid.UUID, site_id: uuid.UUID, site_options: SiteOptions) -> Optional[Site]:
"""
Updates a site associated with your organization.
:param org_id: The ID of the organization to operate against
:param site_id: The ID of the site to update.
:param site_options: Site's updated values
:returns: Site updated or None
:raises: AuthError, ClientError, ServerError
"""
params = {"_oid": org_id}
res = self._client.execute("PATCH", f"{self._ENDPOINT}/{site_id}", params=params, data=site_options)
site_data = res.json_obj.get("data", "")
if site_data:
return self.get(org_id=org_id, name=site_options.name)
return Site.parse_obj(res.json_obj)
def delete(self, org_id: uuid.UUID, site_id: uuid.UUID) -> None:
"""
Deletes a site from your account.
:param org_id: The ID of the organization to operate against
:param site_id: Custom asset site id to delete
:returns: None
:raises: AuthError, ClientError, ServerError
"""
params = {"_oid": org_id}
self._client.execute("DELETE", f"{self._ENDPOINT}/{site_id}", params=params) | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/api/sites.py | 0.875455 | 0.35407 | sites.py | pypi |
import uuid
from typing import List, Optional
from runzero.client import Client
from runzero.types import Explorer, ExplorerSiteID
__all__ = [
"Explorers",
]
class Explorers:
"""Management of runZero Explorers.
Explorers are deployed to a machine and are assigned to :class:`runzero.api.Sites` where
they perform Scans defined by :class:`runzero.api.Tasks`.
Explorers are simliar to :class:`runzero.api.HostedZones` in that both execute tasks.
Whereas HostedZones are provided by runZero and do the work of Explorers, an Explorer is a single
deployable service which can scan networks which are not publicly accessible.
:param client: A handle to the :class:`runzero.Client` client which manages interactions
with the runZero server.
"""
_ENDPOINT = "api/v1.0/org/explorers"
def __init__(self, client: Client):
"""Constructor method"""
self._client = client
def get_all(self, org_id: uuid.UUID) -> List[Explorer]:
"""
Retrieves all active runZero Explorers available within the given Organization.
:param org_id: The ID of the organization to operate against
:returns: a list of all Explorers available within the given Organization
:raises: AuthError, ClientError, ServerError
"""
params = {"_oid": org_id}
res = self._client.execute("GET", self._ENDPOINT, params=params)
result: List[Explorer] = []
for explorer in res.json_obj:
result.append(Explorer.parse_obj(explorer))
return result
def get(
self, org_id: uuid.UUID, name: Optional[str] = None, explorer_id: Optional[uuid.UUID] = None
) -> Optional[Explorer]:
"""
Retrieves the runZero Explorer with the provided name or id, if it is active and exists in the Organization.
:param org_id: The ID of the organization to operate against
:param name: Optional name of the explorer to retrieve. This is a case-insensitive hostname match.
If not provided, must provide explorer_id.
:param explorer_id: Optional id of the explorer to retrieve. If not provided, must provide name.
:returns: explorer requested or None
:raises: AuthError, ClientError, ServerError,
ValueError if neither explorer_id nor name are provided.
"""
params = {"_oid": org_id}
if name is None and explorer_id is None:
raise ValueError("must provide explorer_id or explorer name")
if explorer_id is not None:
res = self._client.execute("GET", f"{self._ENDPOINT}/{explorer_id}", params=params)
if not res:
return None
return Explorer.parse_obj(res.json_obj)
for explorer in self.get_all(org_id):
if explorer.name == name:
return explorer
return None
def update_to_latest_version(self, org_id: uuid.UUID, explorer_id: uuid.UUID) -> None:
"""
Updates an explorer with given explorer id to the latest explorer software version available.
This will force the explorer to upgrade and restart.
:param org_id: The ID of the organization to operate against
:param explorer_id: The ID of the explorer to update
:returns: None
:raises: AuthError, ClientError, ServerError
"""
params = {"_oid": org_id}
self._client.execute("POST", f"{self._ENDPOINT}/{explorer_id}/update", params=params)
def delete(self, org_id: uuid.UUID, explorer_id: uuid.UUID) -> None:
"""
Removes and uninstalls an explorer from your Organization.
:param org_id: The ID of the organization to operate against
:param explorer_id: ID of explorer to delete
:returns: None
:raises: AuthError, ClientError, ServerError
"""
params = {"_oid": org_id}
self._client.execute("DELETE", f"{self._ENDPOINT}/{explorer_id}", params=params)
def move_to_site(self, org_id: uuid.UUID, explorer_id: uuid.UUID, site_id: uuid.UUID) -> Explorer:
"""
Moves an explorer to a different site.
Explorers moved to a new site will no longer execute tasks defined in the old site,
and will be available to execute tasks defined in the new site.
:param org_id: The ID of the organization to operate against
:param explorer_id: ID of explorer to assign to a new site
:param site_id: ID of the site the explorer will be assigned to
:returns: The Explorer with the provided ID, assigned to new site site_id
:raises: AuthError, ClientError, ServerError
"""
params = {"_oid": org_id}
res = self._client.execute(
"PATCH",
f"{self._ENDPOINT}/{explorer_id}",
params=params,
data=ExplorerSiteID(site_id=site_id),
)
return Explorer.parse_obj(res.json_obj) | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/api/explorers.py | 0.842394 | 0.438064 | explorers.py | pypi |
import uuid
from typing import Dict, List, Optional, Union
from runzero.client import Client
from runzero.types import Task, TaskOptions
class Tasks:
"""Management of runZero tasks.
:param client: A handle to the :class:`runzero.Client` client which manages interactions
with the runZero server.
"""
_ENDPOINT = "api/v1.0/org/tasks"
def __init__(self, client: Client):
"""Constructor method"""
self._client = client
def get_all(self, org_id: uuid.UUID, status: Optional[str] = None, query: Optional[str] = None) -> List[Task]:
"""
Retrieves all runZero Tasks available within the given Organization
:param org_id: The unique ID of the organization to retrieve the tasks from.
:param status: An optional status value to filter tasks by. This is a
case-insensitive string match, stripped of surrounding whitespace.
:param query: An optional query to filter returned tasks.
Query string format is the same as in-UI search. See https://www.runzero.com/docs/search-query-tasks/
:returns: A list of all tasks
"""
params: Dict[str, Union[str, uuid.UUID]] = {"_oid": org_id}
if query is not None:
params["search"] = query.strip()
if status is not None:
params["status"] = status.strip()
res = self._client.execute("GET", self._ENDPOINT, params=params)
result: List[Task] = []
for obj in res.json_obj:
task = Task.parse_obj(obj)
result.append(task)
return result
def get(self, org_id: uuid.UUID, name: Optional[str] = None, task_id: Optional[uuid.UUID] = None) -> Optional[Task]:
"""
Retrieves the runZero Task with the provided name or id, if it exists in your organization.
:param org_id: ID of the organization the requested task is in
:param name: Optional name of the task to retrieve. If not provided, must provide task_id.
:param task_id: Optional id of the task to retrieve. If not provided, must provide name.
:raises: AuthError, ClientError, ServerError
ValueError if neither task_id nor name are provided.
"""
params = {"_oid": org_id}
if name is None and task_id is None:
raise ValueError("must provide either task_id or task name")
if task_id is not None:
res = self._client.execute("GET", f"{self._ENDPOINT}/{task_id}", params=params)
return Task.parse_obj(res.json_obj)
# name
for task in self.get_all(org_id):
if task.name == name:
return task
return None
def get_status(self, org_id: uuid.UUID, task_id: uuid.UUID) -> Optional[str]:
"""
Retrieves the status of a runZero Task with the provided id, if it exists in your organization.
The org_id should be provided if using an Account level api key.
:param org_id: ID of the organization the requested task is in
:param task_id: ID of the task you want the status for
:returns: a string result indicating task status, or None
"""
params = {"_oid": org_id}
res = self._client.execute("GET", f"{self._ENDPOINT}/{task_id}", params=params)
task = Task.parse_obj(res.json_obj)
if task is None:
return None
return task.status
def update(self, org_id: uuid.UUID, task_id: uuid.UUID, task_options: TaskOptions) -> Task:
"""
Updates task parameters with provided task options values.
:param org_id: ID of the organization the requested task is in
:param task_id: ID of task to modify
:param task_options: task values to update
:returns: Task which has been updated
:raises: AuthError, ClientError, ServerError
"""
params = {"_oid": org_id}
res = self._client.execute("PATCH", f"{self._ENDPOINT}/{task_id}", data=task_options, params=params)
return Task.parse_obj(res.json_obj)
def stop(self, org_id: uuid.UUID, task_id: uuid.UUID) -> Task:
"""
Signals an explorer to stop a currently running task, or signals server to remove
a future or recurring task.
:param org_id: ID of the organization the requested task is in
:param task_id: ID of task to stop, or scheduled task to remove from schedule.
:returns: Task which has been signalled to stop
:raises: AuthError, ClientError, ServerError
"""
params = {"_oid": org_id}
res = self._client.execute("POST", f"{self._ENDPOINT}/{task_id}/stop", params=params)
return Task.parse_obj(res.json_obj)
def hide(self, org_id: uuid.UUID, task_id: uuid.UUID) -> Task:
"""
Signal that a completed task should be hidden.
:param org_id: ID of the organization the requested task is in
:param task_id: task to modify
:returns: Completed task which has been hidden
:raises: AuthError, ClientError, ServerError
"""
params = {"_oid": org_id}
res = self._client.execute("POST", f"{self._ENDPOINT}/{task_id}/hide", params=params)
return Task.parse_obj(res.json_obj) | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/api/tasks.py | 0.913626 | 0.358185 | tasks.py | pypi |
import base64
import uuid
from typing import Any, List, Optional
from runzero.client import Client
from runzero.types import CustomIntegration
class CustomIntegrations:
"""Read access to custom integrations.
This is a subset of operations available in runzero.admin.custom_integrations.CustomIntegrationsAdmin
which allows 'write' operations for custom integrations.
:param client: A handle to the :class:`runzero.Client` which manages interactions
with the runZero server.
"""
_ENDPOINT = "api/v1.0/org/custom-integrations"
def __init__(self, client: Client):
"""Constructor method"""
self._client = client
def get_all(self, org_id: uuid.UUID) -> List[CustomIntegration]:
"""
Lists all custom integrations available to your account.
:param org_id: The ID of the organization to operate against
:returns: List of custom integrations
:raises: AuthError, ClientError, ServerError
"""
params = {"_oid": org_id}
res = self._client.execute("GET", self._ENDPOINT, params=params)
result: List[CustomIntegration] = []
for src in res.json_obj:
result.append(_resp_to_source(src))
return result
def get(
self, org_id: uuid.UUID, name: Optional[str] = None, custom_integration_id: Optional[uuid.UUID] = None
) -> Optional[CustomIntegration]:
"""
Retrieves runZero custom integrations with either the matching ID or Name.
:param org_id: The ID of the organization to operate against
:param name: Optional, name of the organization you want the UUID for
:param custom_integration_id: Optional, the id of the source to retrieve
:raises: AuthError, ClientError, ServerError
ValueError if neither custom_integration_id nor name are provided.
:returns: The matching CustomIntegration or None
"""
params = {"_oid": org_id}
if name is None and custom_integration_id is None:
raise ValueError("must provide custom_integration_id or source name")
if custom_integration_id is not None:
res = self._client.execute("GET", f"{self._ENDPOINT}/{custom_integration_id}", params=params)
return _resp_to_source(res.json_obj)
# name
for src in self.get_all(org_id):
if src.name == name:
return src
return None
def _resp_to_source(json_obj: Any) -> CustomIntegration:
source = CustomIntegration.parse_obj(json_obj)
if source.icon is not None:
source.icon = base64.b64decode(source.icon)
return source | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/api/custom_integrations.py | 0.895016 | 0.356643 | custom_integrations.py | pypi |
import uuid
from typing import List, Optional
from runzero.client import Client
from runzero.types import HostedZone
__all__ = [
"HostedZones",
]
class HostedZones:
"""Management of runZero hosted zones.
A hosted zone is a pool of cloud-hosted :class:`runzero.api.Explorers`
available to Enterprise customers.
Instead of specifing a single manually deployed explorer, a hosted zone may be specified
when working with :class:`runzero.api.Tasks` or :class:`runzero.api.Templates`. Hosted zones
can only reach public IP space.
:param client: A handle to the :class:`runzero.Client` client which manages interactions
with the runZero server.
"""
_ENDPOINT = "api/v1.0/org/hosted-zones"
def __init__(self, client: Client):
"""Constructor method"""
self._client = client
def get_all(self, org_id: uuid.UUID) -> List[HostedZone]:
"""
Retrieves all active runZero hosted zones available within the given organization.
:param org_id: The ID of the organization to operate against
:returns: list of HostedZones
:raises: AuthError, ClientError, ServerError
"""
params = {"_oid": org_id}
res = self._client.execute("GET", self._ENDPOINT, params=params)
result: List[HostedZone] = []
for hosted_zone in res.json_obj:
result.append(HostedZone.parse_obj(hosted_zone))
return result
def get(
self, org_id: uuid.UUID, name: Optional[str] = None, hosted_zone_id: Optional[uuid.UUID] = None
) -> Optional[HostedZone]:
"""
Retrieves the runZero hosted zone with the provided name or id, if it is active and exists in the Organization.
:param org_id: The ID of the organization to operate against
:param name: Optional name of the hosted zone to retrieve. This is a case-insensitive match.
If not provided, must provide hosted_zone_id.
:param hosted_zone_id: Optional id of the hosted zone to retrieve. If not provided, must provide name.
:returns: HostedZone requested or None
:raises: AuthError, ClientError, ServerError,
ValueError if neither hosted_zone_id nor name are provided.
"""
params = {"_oid": org_id}
if name is None and hosted_zone_id is None:
raise ValueError("must provide hosted_zone_id or hosted zone name")
if hosted_zone_id is not None:
res = self._client.execute("GET", f"{self._ENDPOINT}/{hosted_zone_id}", params=params)
if not res:
return None
return HostedZone.parse_obj(res.json_obj)
# name
for hosted_zone in self.get_all(org_id):
if hosted_zone.name == name:
return hosted_zone
return None | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/api/hosted_zones.py | 0.900544 | 0.394084 | hosted_zones.py | pypi |
import gzip
import tempfile
import time
import uuid
from typing import Iterable, List, Optional
from runzero.client import Client
from runzero.types import ImportAsset, ImportTask, NewAssetImport, Task
class CustomAssets:
"""Management of Custom Asset Data for your own custom integrations.
Custom data integrations are descriptive registered associations between integrations of data
and assets imported which are associated with those integrations.
The data sent to the server has basic checks performed and is loaded as an import task
when it can find the next available worker to do so. Therefore, the result
is a `class:runzero.Task` which you can check the status of.
See related :class:`runzero.account.CustomIntegrations` to work with the custom asset data
integrations registered to the account.
:param client: A handle to the :class:`runzero.Client` which manages interactions
with the runZero server.
"""
_ENDPOINT = "api/v1.0/import/org/{oid}/assets"
def __init__(self, client: Client):
"""Constructor method"""
self._client = client
def upload_assets(
self,
org_id: uuid.UUID,
site_id: uuid.UUID,
custom_integration_id: uuid.UUID,
assets: List[ImportAsset],
task_info: Optional[ImportTask] = None,
) -> Task:
"""
Upload your custom assets to the runZero platform.
See the ImportAsset object for a description of the data that can be imported.
Assets are merged according to the merge logic in the release of the platform. This
involves fields other than the custom_properties dictionary.
If the runZero asset ID is known externally, it may be specified on any single
ImportAsset object to override all merge rules and force that object's data onto
the runZero asset with that ID.
:param org_id: Organization ID to import these assets into
:param site_id: ID of the Site to import these asstes into
:param custom_integration_id: custom integration id for the provided Import Assets
:param assets: A collection of ImportAssets to upload
:param task_info: Descriptive information associated with the import
task to be created. If omitted, a task name is generated for you
:returns: Task: The runZero task associated with processing the asset upload
:raises: ServerError, ClientError, AuthError
"""
# create default task_info not supplied
if task_info is None:
task_info = ImportTask(name=f"Custom Asset Import {time.time_ns():.0f}", description="py-sdk import")
else:
# set defaults if user sets these to empty
if task_info.name == "":
task_info.name = f"Custom Asset Import {time.time_ns():.0f}"
if task_info.description is None or task_info.description == "":
task_info.description = "py-sdk import"
asset_import_req = _create_custom_asset_request(
site_id=site_id,
custom_integration_id=custom_integration_id,
import_task=task_info,
assets=assets,
)
tags_as_str = ""
if asset_import_req.import_task.tags is not None:
tags_as_str = ",".join([tag.__root__ for tag in asset_import_req.import_task.tags])
multipart_form_data = (
("assetData", ("asset_data.jsonl.gz", asset_import_req.asset_data)),
("siteId", (None, str(asset_import_req.site_id))),
("customIntegrationId", (None, str(asset_import_req.custom_integration_id))),
("importTask.name", (None, asset_import_req.import_task.name)),
("importTask.description", (None, asset_import_req.import_task.description)),
("importTask.tags", (None, tags_as_str)),
)
res = self._client.execute("POST", self._ENDPOINT.format(oid=org_id), files=multipart_form_data, multipart=True)
return Task.parse_obj(res.json_obj)
def _import_assets_into_gzip_jsonl(import_assets: Iterable[ImportAsset]) -> bytes:
tmp = tempfile.TemporaryFile(mode="w+b")
with gzip.GzipFile(fileobj=tmp, mode="wb") as gzw:
for asset_obj in import_assets:
gzw.write(asset_obj.json(by_alias=True).encode("utf-8") + "\n".encode("utf-8"))
tmp.seek(0)
return tmp.read()
def _create_custom_asset_request(
site_id: uuid.UUID, custom_integration_id: uuid.UUID, assets: Iterable[ImportAsset], import_task: ImportTask
) -> NewAssetImport:
return NewAssetImport(
site_id=site_id,
custom_integration_id=custom_integration_id,
import_task=import_task,
asset_data=_import_assets_into_gzip_jsonl(assets),
) | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/api/imports/assets.py | 0.772187 | 0.431464 | assets.py | pypi |
import uuid
from typing import List, Optional
from runzero.client import Client
from runzero.types import ScanTemplate, ScanTemplateOptions, Task
# pylint: disable=duplicate-code ## Acknowledged that this very similar to the org-level tasks interface
class TasksAdmin:
"""Account level management of runZero tasks in all organizations.
:param client: A handle to the :class:`runzero.Client` client which manages interactions
with the runZero server.
"""
_ENDPOINT = "api/v1.0/account/tasks"
def __init__(self, client: Client):
"""Constructor method"""
self._client = client
def get_all(self, status: Optional[str] = None, query: Optional[str] = None) -> List[Task]:
"""
Retrieves up to 1000 runZero Tasks available within all organizations in the account.
:param query: An optional query to filter returned tasks.
Query string format is the same as in-UI search. See https://www.runzero.com/docs/search-query-tasks/
:param status: An optional status value to filter tasks by. This is a
case-insensitive string match, stripped of surrounding whitespace.
:returns: A list of all tasks, or tasks which match the provided query string
"""
params = {}
if query is not None:
params["search"] = query.strip()
if status is not None:
params["status"] = status.strip()
res = self._client.execute("GET", self._ENDPOINT, params=params)
result: List[Task] = []
for obj in res.json_obj:
task = Task.parse_obj(obj)
result.append(task)
return result
class TemplatesAdmin:
"""Account level management of runZero scan templates in all organizations.
:param client: A handle to the :class:`runzero.Client` client which manages interactions
with the runZero server.
"""
_ENDPOINT = "api/v1.0/account/tasks/templates"
def __init__(self, client: Client):
"""Constructor method"""
self._client = client
def get_all(self, query: Optional[str] = None) -> List[ScanTemplate]:
"""
Retrieves up to 1000 runZero task scan templates available to all organizations in the account.
:param query: An optional query to filter returned templates.
Query string format is the same as in-UI search. See https://www.runzero.com/docs/search-query-tasks/
:returns: A list of all task scan templates
:raises: AuthError, ClientError, ServerError
"""
params = {}
if query is not None:
params["search"] = query.strip()
res = self._client.execute("GET", f"{self._ENDPOINT}", params=params)
result: List[ScanTemplate] = []
for obj in res.json_obj:
template = ScanTemplate.parse_obj(obj)
result.append(template)
return result
def get(self, name: Optional[str] = None, scan_template_id: Optional[uuid.UUID] = None) -> Optional[ScanTemplate]:
"""
Retrieves the scan template with the provided name or id, if it exists in your account.
:param name: Optional, name of the scan template to retrieve
:param scan_template_id: Optional, the id of the scan template to retrieve
:returns: ScanTemplate created or None
:raises: AuthError, ClientError, ServerError
ValueError if neither scan_template_id nor name are provided.
"""
if name is None and scan_template_id is None:
raise ValueError("must provide scan_template_id or scan template name")
if scan_template_id is not None:
res = self._client.execute("GET", f"{self._ENDPOINT}/{scan_template_id}")
return ScanTemplate.parse_obj(res.json_obj)
for scan_template in self.get_all():
if scan_template.name == name:
return scan_template
return None
def create(self, scan_template_options: ScanTemplateOptions) -> Optional[ScanTemplate]:
"""
Creates a new scan template in your account.
:param scan_template_options: Description of scan template to create
:returns: ScanTemplate created or None
:raises: AuthError, ClientError, ServerError
"""
res = self._client.execute("POST", f"{self._ENDPOINT}", data=scan_template_options)
return ScanTemplate.parse_obj(res.json_obj)
def update(
self,
new_scan_template_values: ScanTemplate,
) -> Optional[ScanTemplate]:
"""
Updates an existing scan template in your account by replacing all values.
The 'id' field of the ScanTemplate must match an existing scan template, which
will be changed to the new ScanTemplate.
:param scan_template_id: The id of the scan template to update
:param new_scan_template_values: Values to update the target scan template with
:returns: ScanTemplate updated with new values or None
:raises: AuthError, ClientError, ServerError
"""
res = self._client.execute("PUT", f"{self._ENDPOINT}", data=new_scan_template_values)
return ScanTemplate.parse_obj(res.json_obj)
def delete(self, scan_template_id: uuid.UUID) -> None:
"""
Deletes a scan template with provided ID from your account.
:param scan_template_id: The ID of the scan template to delete
:returns: None
:raises: AuthError, ClientError, ServerError
"""
self._client.execute("DELETE", f"{self._ENDPOINT}/{scan_template_id}") | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/api/admin/tasks.py | 0.91419 | 0.401219 | tasks.py | pypi |
import base64
import pathlib
import uuid
from pathlib import Path
from typing import Any, List, Optional, Union
from runzero.client import Client
from runzero.errors import Error
from runzero.types import BaseCustomIntegration, CustomIntegration, NewCustomIntegration
from ._sdk_source_icon import _PY_ICON_BYTES
class CustomIntegrationsAdmin:
"""Full Management of custom integrations.
Custom integrations are descriptive registered associations between integrations of data and
assets imported which are associated with those integrations.
This is a superset of operations available in runzero.custom_integrations.CustomIntegrations
which allows only read operations.
:param client: A handle to the :class:`runzero.Client` which manages interactions
with the runZero server.
"""
_ENDPOINT = "api/v1.0/account/custom-integrations"
PYTHON_ICON = _PY_ICON_BYTES
"""A default icon representing a custom integration defined via this Python SDK."""
def __init__(self, client: Client):
"""Constructor method"""
self._client = client
def get_all(self) -> List[CustomIntegration]:
"""
Lists all custom integrations available to your account.
:returns: List of custom integrations
:raises: AuthError, ClientError, ServerError
"""
res = self._client.execute("GET", self._ENDPOINT)
result: List[CustomIntegration] = []
for src in res.json_obj:
result.append(_resp_to_source(src))
return result
def get(
self, name: Optional[str] = None, custom_integration_id: Optional[uuid.UUID] = None
) -> Optional[CustomIntegration]:
"""
Retrieves runZero custom integrations with either the matching ID or Name.
:param name: Optional, name of the custom integration to retrieve
:param custom_integration_id: Optional, the id of the custom integration to retrieve
:raises: AuthError, ClientError, ServerError
ValueError if neither custom_integration_id nor name are provided.
:returns: The matching CustomIntegration or None
"""
if name is None and custom_integration_id is None:
raise ValueError("must provide custom_integration_id or source name")
if custom_integration_id is not None:
res = self._client.execute("GET", f"{self._ENDPOINT}/{custom_integration_id}")
return _resp_to_source(res.json_obj)
# name
for src in self.get_all():
if src.name == name:
return src
return None
def create(
self,
name: str,
description: Optional[str] = None,
icon: Optional[Union[bytes, bytearray, memoryview, Path, str]] = PYTHON_ICON,
) -> CustomIntegration:
"""
Creates a new custom integration.
:param name: Name of custom integration to be created in to your account. The
name may not contain spaces, tabs, or other whitespace
:param description: Optional description of custom integration to be created
:param icon: Optional file path to, or bytes of icon data. The icon must be
a png formatted image with a maximum size of 32x32. Icon format
is validated by the server. The default value assigns your custom
data source the Python logo to indicate it was created by this SDK.
Use None to have the server choose the default custom integration logo,
a grey runZero logo
:returns: CustomIntegration created
:raises: AuthError, ClientError, ServerError
"""
if isinstance(icon, (Path, str)):
try:
icon = pathlib.Path(icon).resolve()
with icon.open("rb") as iconf:
icon = iconf.read()
except (IOError, OSError) as exc:
raise Error from exc
if isinstance(icon, (bytes, bytearray, memoryview)):
icon = base64.b64encode(icon).decode("utf-8")
req = NewCustomIntegration(name=name, description=description, icon=icon)
res = self._client.execute("POST", self._ENDPOINT, data=req)
return _resp_to_source(res.json_obj)
def update(self, custom_integration_id: uuid.UUID, source_options: BaseCustomIntegration) -> CustomIntegration:
"""
Updates a custom integration associated with your account.
:param custom_integration_id: custom integration with updated values
:param source_options: custom integration request values to update
:returns: CustomIntegration updated
:raises: AuthError, ClientError, ServerError
"""
res = self._client.execute("PATCH", f"{self._ENDPOINT}/{custom_integration_id}", data=source_options)
return _resp_to_source(res.json_obj)
def delete(self, custom_integration_id: uuid.UUID) -> CustomIntegration:
"""
Deletes a custom integration from your account.
:param custom_integration_id: custom integration id to delete
:returns: CustomIntegration deleted
:raises: AuthError, ClientError, ServerError
"""
res = self._client.execute("DELETE", f"{self._ENDPOINT}/{custom_integration_id}")
return _resp_to_source(res.json_obj)
def _resp_to_source(json_obj: Any) -> CustomIntegration:
source = CustomIntegration.parse_obj(json_obj)
if source.icon is not None:
source.icon = base64.b64decode(source.icon)
return source | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/api/admin/custom_integrations.py | 0.913024 | 0.335623 | custom_integrations.py | pypi |
import uuid
from typing import List, Optional
from runzero.client import Client
from runzero.types import Organization, OrgOptions
class OrgsAdmin:
"""Management of runZero organizations.
Organizations are an administrative boundary for various platform-level objects and methods.
:param client: A handle to the :class:`runzero.Client` client which manages interactions
with the runZero server.
"""
_ENDPOINT = "api/v1.0/account/orgs"
def __init__(self, client: Client):
"""Constructor method"""
self._client = client
def get_all(self) -> List[Organization]:
"""
Retrieves all runZero Organizations available to your account
:returns: A list of all Organizations available to your account
:raises: AuthError, ClientError, ServerError
"""
res = self._client.execute("GET", self._ENDPOINT)
result: List[Organization] = []
for org in res.json_obj:
result.append(Organization.parse_obj(org))
return result
def get(self, org_id: Optional[uuid.UUID] = None, name: Optional[str] = None) -> Optional[Organization]:
"""
Retrieves the runZero Organization with the provided name or id, if it exists in your account.
:param org_id: Optional id of the organization to retrieve
:param name: Optional name of the organization to retrieve
:returns: Organization if found, or None
:raises: AuthError, ClientError, ServerError
"""
if name is None and org_id is None:
raise ValueError("must provide org_id or organization name")
if org_id is not None:
res = self._client.execute("GET", f"{self._ENDPOINT}/{org_id}")
return Organization.parse_obj(res.json_obj)
# name
for org in self.get_all():
if org.name == name:
return org
return None
def create(self, org_options: OrgOptions) -> Optional[Organization]:
"""
Creates a new organization in your account.
:param org_options: Description of organization to create
:returns: Organization created or None
:raises: AuthError, ClientError, ServerError
"""
res = self._client.execute("PUT", self._ENDPOINT, data=org_options)
obj = res.json_obj
data_obj = obj.get("data", "")
if data_obj:
obj = data_obj
return Organization.parse_obj(obj)
def update(self, org_id: uuid.UUID, org_options: OrgOptions) -> Optional[Organization]:
"""
Updates an organization associated with your account.
:param org_id: The ID of the organization to patch
:param org_options: Organization's updated values
:returns: Organization updated or None
:raises: AuthError, ClientError, ServerError
"""
res = self._client.execute("PATCH", f"{self._ENDPOINT}/{org_id}", data=org_options)
return Organization.parse_obj(res.json_obj)
def delete(self, org_id: uuid.UUID) -> None:
"""
Deletes an organization with provided ID from your account.
:param org_id: The ID of the organization to operate against
:returns: None
:raises: AuthError, ClientError, ServerError
"""
self._client.execute("DELETE", f"{self._ENDPOINT}/{org_id}") | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/api/admin/orgs.py | 0.890199 | 0.334019 | orgs.py | pypi |
from __future__ import annotations
from datetime import datetime
from enum import Enum
from ipaddress import IPv4Address, IPv6Address
from typing import Any, Dict, List, Optional
from uuid import UUID
from pydantic import BaseModel, Field
class BaseResponse(BaseModel):
"""
Minimal identifying information with lifecycle metadata
"""
class Config:
allow_population_by_field_name = True
id: UUID = Field(..., example="f6cfb91a-52ea-4a86-bf9a-5a891a26f52b")
"""
The unique ID of the object
"""
client_id: UUID = Field(..., alias="clientId", example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The unique ID of the runZero client/customer account that owns the object
"""
created_by_id: UUID = Field(..., alias="createdById", example="f6cfb91a-52ea-4a86-bf9a-5a891a26f52b")
"""
The unique ID of the entity that created the object
"""
created_at: datetime = Field(..., alias="createdAt", example="2023-03-06T18:14:50.52Z")
"""
A timestamp indicating creation time of the object
"""
updated_at: datetime = Field(..., alias="updatedAt", example="2023-03-06T18:14:50.52Z")
"""
A timestamp indicating last modified time of the object
"""
destroyed_at: Optional[datetime] = Field(None, alias="destroyedAt", example="2023-03-06T18:14:50.52Z")
"""
A timestamp indicating deletion time of the object
"""
class BaseCustomIntegration(BaseModel):
class Config:
allow_population_by_field_name = True
name: Optional[str] = Field(None, example="my-custom-integration", regex="^\\S+$")
"""
The unique name of the custom integration, without spaces.
"""
icon: Optional[str] = Field(
None,
example="iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAomVYSWZNTQAqAAAACAAFARIAAwAAAAEAAQAAARoABQAAAAEAAABKARsABQAAAAEAAABSASgAAwAAAAEAAgAAh2kABAAAAAEAAABaAAAAAAAAAJAAAAABAAAAkAAAAAEABJKGAAcAAAASAAAAkKABAAMAAAABAAEAAKACAAQAAAABAAAAIKADAAQAAAABAAAAIAAAAABBU0NJSQAAAFNjcmVlbnNob3TIMt7LAAAACXBIWXMAABYlAAAWJQFJUiTwAAADBWlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNi4wLjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczpleGlmPSJodHRwOi8vbnMuYWRvYmUuY29tL2V4aWYvMS4wLyIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iPgogICAgICAgICA8ZXhpZjpQaXhlbFhEaW1lbnNpb24+MTAyPC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6Q29sb3JTcGFjZT4xPC9leGlmOkNvbG9yU3BhY2U+CiAgICAgICAgIDxleGlmOlVzZXJDb21tZW50PlNjcmVlbnNob3Q8L2V4aWY6VXNlckNvbW1lbnQ+CiAgICAgICAgIDxleGlmOlBpeGVsWURpbWVuc2lvbj4xMDI8L2V4aWY6UGl4ZWxZRGltZW5zaW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICAgICA8dGlmZjpZUmVzb2x1dGlvbj4xNDQ8L3RpZmY6WVJlc29sdXRpb24+CiAgICAgICAgIDx0aWZmOlhSZXNvbHV0aW9uPjE0NDwvdGlmZjpYUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6T3JpZW50YXRpb24+MTwvdGlmZjpPcmllbnRhdGlvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+CtVpwSkAAAVcSURBVFgJxVbZT1xlFP/NdmdhpgwMDBQQEISWNNKCMEJtrTFuTWN88MGqaVNNH/QPMK01NU2MtjbR2GhCNFZDYkwaY4021RcfGlPKYi2pAS0FOmWnUJYCs8+d8Zxv5sqFuUyhIeFk7vbds/zO75zz3dHFSbCBot/A2CK0cb0AMJFaZOr16XPUraYED+pcSY7tdTqd8rjkel8A6Yz5XVSWQaljfn4BCz4fZDkKH11nZu7B7c5FZUUFBWcVbRBpS6AYhSMR3L49gLHxcYzfmUDQ70cg4EdPnxcTk5PiuevmLdy83r4kO344+t77OHbkHTgcdm0QFERTZFkW6719/fEXX97Pk5LmsMVdxVvjVds98eq6nfG6xj3xbTUN8R31u4TNydOfJGLEYimxNEtAWqJmA4ODKC0pEVltrfbAYbcJOskPIsTKxOw8xmYXgLvTpHNP6KlPuaXbYCD6x70zxF4n8vPcKSykLcHZb5uFv9qGJ3GtywssDKn9A85SPFpRhMIn6lBWUoSC/DxYbTa4XC60d/yJpubzeKyqhAB0U4/Mrw6Akv0cGVz47XcY3WXo7BtCZXkhPj35FQo25xMLetgyMpDlzITFbIbBYIRZXBdHzm63o+nzz/BX+6AArVthHFdkIB6LYXxyGjkOG8b7u/D0/pewb+8LSxlQPXGxZbLhBIwGA2q2V6O1tQ2ZTickSUJp8UNCe/m+sCIA1jYZDQhFosKQA0SiUeGcWgk81Tzb3A8Gg148G1RZlj1cCj4UCckx+AIhWExGSORXkbQAYpSRWhg9B2Wi+crZcvAwOZ+YncO0PwCfP4hoOILROR+GqUlDgSCmaG0kGMG50SlceuUZ7KkqE8D1eh3SAlAHF/dMQ1KUXukaHMWJX1vw48hdEBKmiWsBkHMQOEIqri6LiQbFD2ZPLWsDkLRkJ0x3p3cYtWd+AGwSCq0Scq1mGCkewaBAQIgYXCDd4WgMAbb1h2mnXMrqAwHg4KGojBMXL4vgW+xW9ATDGJmmPYG7gX7ULICZspaM2O2wIo904gU5yKamFsI6JGsDQEZMINuOTc/iF6ppuc0sgu/OzMBbz3ngzrTDTRuWRTKJ0bQTK5ssZo4FPZVEaT89l4ZkbQA4uoKAqYzIsHGd5wM48FQNXttVK5yqTzJNDn83MmiDSiatfi0aeslC+gcFAWklvSlOJQZCwh8ultb2Drx+6DAOHn4bjtwd8A4MivXlk3VfBpQAYvySYyg8MRaV8FSwSCaqO8nA4BC+bz4LV1ElEOxHlJjQkrQAeE5j0YRjmZouQiPGdeSdbrnI3PYk3f/8i59+voCWK21wFm1BttOBqeFs8NbMwomoJcGbeiV5zwnp9QbMBsIw5ZWjpa0Drx58E2e+aFJpJ4LygsLAjZ5eHD92BN09/XikeDN6u67io1Pvim+IsneoHKzcA5JZQo4rC5HJeZTnu2gbDeL8ue/Qf8u7aL8Y//81m80q7h0ZFly9cgnIrcQbhw6ItWSVxL1ySmFA2WJtVis+OH6UdpQ7uHG9Q2w8bGQmYGpZuq1QuYMh8VpnlPDhqdMY+/sP+gznCYa4pMtFsweUOu19/lmqZSu+/PobtLRfE7YWSyJDMQUmA5y84dBhSvZFY4MH9C8KOTkuODMzhY0W9QoQTQDKSzbc2diAxz314g+FmAT69rPEuemoPJe5DGOz8CXHj//1gA/WIXumXStzoUCntAA4IM+tgbJz0nddLVyKj/fVoyBrE3kxwlOe+N4rc862iUNtlXqv+Z8wVW2xy/kdO14vScuAOohWUIVi1mNMWjpqH1r3q2ZAy3g91lLGcD2crsXHhgP4D/iMWRnl47GPAAAAAElFTkSuQmCC",
)
"""
Base64 encoded png with maximum size 256x256 pixels
"""
description: Optional[str] = Field(None, example="My custom integration description.")
"""
A text description of the custom integration
"""
class CustomIntegration(BaseCustomIntegration, BaseResponse):
class Config:
allow_population_by_field_name = True
name: str = Field(..., example="my-custom-integration", regex="^\\S+$")
"""
The unique name of the custom integration, without spaces.
"""
class NewCustomIntegration(BaseCustomIntegration):
class Config:
allow_population_by_field_name = True
name: str = Field(..., example="my-custom-integration", regex="^\\S+$")
"""
The unique name of the custom integration, without spaces.
"""
class Tag(BaseModel):
class Config:
allow_population_by_field_name = True
__root__: str = Field(..., max_length=1024)
class ImportTask(BaseModel):
"""
Information which describes the task created when asset data is imported.
"""
class Config:
allow_population_by_field_name = True
name: str = Field(..., example="my import task", max_length=100)
description: Optional[str] = Field(None, example="importing assets from custom integration A", max_length=1024)
tags: Optional[List[Tag]] = Field(None, example=["tag1", "tag2"], max_items=100)
"""
Arbitrary string tag values which are applied to the asset data import task created.
"""
class NewAssetImport(BaseModel):
"""
Represents a request to import asset data described by the specified custom integration into the specified site.
Assets will be created new or merged according to merge rules defined by the version of the platform
you are uploading the asset data file to. Typically, this involves matching network and other unique
single or grouped properties.
There is a maximum of 256 custom asset properties that can be applied to any asset. This means
that, aside from the per-import asset property limit set on ImportAsset, if a new import sets
different custom properties on the same asset, the new properties are combined with the
pre-existing ones.
"""
class Config:
allow_population_by_field_name = True
site_id: UUID = Field(..., alias="siteId", example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The ID of the site assets are to be imported into.
"""
custom_integration_id: UUID = Field(
..., alias="customIntegrationId", example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8"
)
"""
The unique ID of the registered custom integration which produced the asset data. Uniqueness is not checked/enforced. See /account/custom-integrations api.
"""
import_task: ImportTask = Field(..., alias="importTask", title="ImportTask")
"""
Information which describes the task created when asset data is imported.
"""
asset_data: bytes = Field(..., alias="assetData")
"""
A gzip (not .tar.gz) compressed file containing ImportAsset objects. The file data may be a JSON array of
ImportAsset objects, e.g. [{},{},...] or JSONL format, with a single JSON representation of an ImportAsset
object on each new line, e.g. {}\n{}\n...
"""
class NetworkInterface(BaseModel):
class Config:
allow_population_by_field_name = True
ipv4_addresses: Optional[List[IPv4Address]] = Field(None, alias="ipv4Addresses", max_items=256)
"""
Represents IPV4 addresses. Addresses are ordered from most to least likely to uniquely identify the asset.
"""
ipv6_addresses: Optional[List[IPv6Address]] = Field(None, alias="ipv6Addresses", max_items=100)
"""
Represents the IPV6 addresses. Addresses are ordered from most to least likely to uniquely identify the asset.
"""
mac_address: Optional[str] = Field(
None,
alias="macAddress",
example="01:23:45:67:89:0A",
max_length=23,
regex=(
"^([A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2}$|^([A-Fa-f0-9]{2}:){7}[A-Fa-f0-9]{2}$|^([A-Fa-f0-9]{2}-){5}[A-Fa-f0-9]{2}$|^([A-Fa-f0-9]{2}-){7}[A-Fa-f0-9]{2}$|^([A-Fa-f0-9]{4}\\.){2}[A-Fa-f0-9]{4}$|^([A-Fa-f0-9]{4}\\.){3}[A-Fa-f0-9]{4}$|^([A-Fa-f0-9]{4}"
" ){3}[A-Fa-f0-9]{4}$"
),
)
"""
Represents a MAC address in IEEE 802 MAC/EUI-48, or EUI-64 form in one of the following formats:
01:23:45:67:89:AB
01:23:45:67:89:ab:cd:ef
01-23-45-67-89-ab
01-23-45-67-89-ab-cd-ef
0123.4567.89ab
0123.4567.89ab.cdef
0123 4567 89ab cdEF
"""
class Hostname(BaseModel):
class Config:
allow_population_by_field_name = True
__root__: str = Field(..., example="host.domain.com", max_length=260)
class CustomAttribute(BaseModel):
class Config:
allow_population_by_field_name = True
__root__: str = Field(..., max_length=1024)
class ImportAsset(BaseModel):
"""
Represents a custom asset to be created or merged after import.
"""
class Config:
allow_population_by_field_name = True
id: str = Field(..., max_length=1024)
"""
Any value which can uniquely identify the asset within the custom integration.
"""
run_zero_id: Optional[UUID] = Field(None, alias="runZeroID", example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The unique identifier of the runZero asset to merge into.
"""
network_interfaces: Optional[List[NetworkInterface]] = Field(
None, alias="networkInterfaces", max_items=256, title="NetworkInterfaces"
)
"""
The asset's networking configuration.
"""
hostnames: Optional[List[Hostname]] = Field(None, max_items=100)
"""
Represents hostnames the asset is assigned or reachable at. These can be fully-qualified hostnames with the domain name, or a short hostname.
"""
domain: Optional[str] = Field(None, example="domain.com", max_length=260)
"""
Represents a single domain name which could be applied to all non-fqdns in the hostnames field.
"""
first_seen_ts: Optional[datetime] = Field(None, alias="firstSeenTS", example="2023-03-06T18:14:50.52Z")
"""
Represents the earliest time the asset was seen by the custom integration reporting it, using a date string as defined by RFC 3339, section 5.6.
"""
os: Optional[str] = Field(None, example="Ubuntu Linux 22.04", max_length=1024)
"""
The name of the asset's operating system. It is advisable to keep the data clean by normalizing to existing values when possible.
"""
os_version: Optional[str] = Field(None, alias="osVersion", example="22.04", max_length=1024)
"""
The version of the asset's operating system. It is advisable to keep the data clean by normalizing to existing values when possible.
"""
manufacturer: Optional[str] = Field(None, example="Apple Inc.", max_length=1024)
"""
The manufacturer of the operating system of the asset. It is advisable to keep the data clean by normalizing to existing values when possible.
"""
model: Optional[str] = Field(None, example="Macbook Air", max_length=1024)
"""
The hardware model of the asset. It is advisable to keep the data clean by normalizing to existing values when possible.
"""
tags: Optional[List[Tag]] = Field(None, example=["foo", "key=value"], max_items=100)
"""
Arbitrary string tags applied to the asset.
"""
device_type: Optional[str] = Field(None, alias="deviceType", example="Desktop", max_length=1024)
custom_attributes: Optional[Dict[str, CustomAttribute]] = Field(
None, alias="customAttributes", title="CustomAttributes"
)
"""
Flat map of arbitrary string key/value pairs representing custom attribute data not described in properties above. Note the maximum number of keys and length of values. Additionally, property names may only be 256 characters long.
"""
class ScanFrequency(Enum):
"""
A string time duration value representing execution frequency, if scheduled to repeat.
"""
once = "once"
hourly = "hourly"
daily = "daily"
weekly = "weekly"
monthly = "monthly"
continuous = "continuous"
class ScanOptions(BaseModel):
"""
Options which can be set to create or modify a scan.
"""
class Config:
allow_population_by_field_name = True
targets: str = Field(..., example="defaults")
excludes: Optional[str] = None
scan_name: Optional[str] = Field(None, alias="scan-name", example="My Scan")
scan_description: Optional[str] = Field(None, alias="scan-description", example="Scan of Wireless")
"""
A description of the scan.
"""
scan_template: Optional[UUID] = Field(None, alias="scan-template", example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
scan_frequency: Optional[ScanFrequency] = Field(None, alias="scan-frequency", example="hour")
"""
A string time duration value representing execution frequency, if scheduled to repeat.
"""
scan_start: Optional[str] = Field(None, alias="scan-start", example="0")
"""
Unix timestamp value indicating when the template was created.
"""
scan_tags: Optional[str] = Field(None, alias="scan-tags", example="owner=IT location=Texas")
scan_grace_period: Optional[str] = Field(None, alias="scan-grace-period", example="4")
agent: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
explorer: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
hosted_zone_id: Optional[str] = Field(None, alias="hosted-zone-id", example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The string 'auto' will use any available hosted zone. Otherwise, provide the string name (hostedzone1) or UUID (e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8) of a hosted zone.
"""
hosted_zone_name: Optional[str] = Field(None, alias="hosted-zone-name", example="auto")
"""
The string 'auto' will use any available hosted zone. Otherwise, provide the string name (hostedzone1) of the hosted zone.
"""
rate: Optional[str] = Field(None, example="10000")
max_host_rate: Optional[str] = Field(None, alias="max-host-rate", example="100")
passes: Optional[str] = Field(None, example="3")
max_attempts: Optional[str] = Field(None, alias="max-attempts", example="3")
max_sockets: Optional[str] = Field(None, alias="max-sockets", example="500")
max_group_size: Optional[str] = Field(None, alias="max-group-size", example="4096")
max_ttl: Optional[str] = Field(None, alias="max-ttl", example="255")
tos: Optional[str] = Field(None, example="255")
tcp_ports: Optional[str] = Field(None, alias="tcp-ports", example="1-1000,5000-6000")
tcp_excludes: Optional[str] = Field(None, alias="tcp-excludes", example="9500")
screenshots: Optional[str] = Field(None, example="true")
nameservers: Optional[str] = Field(None, example="8.8.8.8")
subnet_ping: Optional[str] = Field(None, alias="subnet-ping", example="true")
subnet_ping_net_size: Optional[str] = Field(None, alias="subnet-ping-net-size", example="256")
subnet_ping_probes: Optional[str] = Field(
None,
alias="subnet-ping-probes",
example="arp, echo, syn, connect, netbios, snmp, ntp, sunrpc, ike, openvpn, mdns",
)
"""
Optional subnet ping probe list as comma separated strings. The example shows possibilities.
"""
subnet_ping_sample_rate: Optional[str] = Field(None, alias="subnet-ping-sample-rate", example="3")
host_ping: Optional[str] = Field(None, alias="host-ping", example="false")
host_ping_probes: Optional[str] = Field(
None,
alias="host-ping-probes",
example="arp, echo, syn, connect, netbios, snmp, ntp, sunrpc, ike, openvpn, mdns",
)
"""
Optional host ping probe list as comma separated strings. The example shows possibilities.
"""
probes: Optional[str] = Field(
None,
example="arp,bacnet,connect,dns,echo,ike,ipmi,mdns,memcache,mssql,natpmp,netbios,pca,rdns,rpcbind,sip,snmp,ssdp,syn,ubnt,wlan-list,wsd",
)
"""
Optional probe list, otherwise all probes are used
"""
class ScanTemplateOptions(BaseModel):
"""
Options which can be set to create a scan template.
"""
class Config:
allow_population_by_field_name = True
name: str = Field(..., example="My Scan Template")
"""
Name of the template.
"""
description: Optional[str] = Field(None, example="My Scan Template")
"""
Description of the template.
"""
organization_id: UUID = Field(..., example="f6cfb91a-52ea-4a86-bf9a-5a891a26f52b")
"""
The ID of the organization the template will be created in
"""
params: Optional[Dict[str, str]] = None
"""
A number of scan parameter values. Currently there is no authoritative list of acceptable values. See existing templates for examples.
"""
global_: bool = Field(..., alias="global", example=False)
"""
Whether the template is globally available to all organizations.
"""
acl: Dict[str, Any] = Field(..., example={"e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8": "user"})
"""
A map of IDs to strings which describe how the template may be accessed. Currently there is no authoritative list of acceptable values. See existing templates for examples.
"""
class ScanTemplate(BaseModel):
"""
A scan task template
"""
class Config:
allow_population_by_field_name = True
id: UUID = Field(..., example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
ID of the template.
"""
name: Optional[str] = Field(None, example="My Scan Template")
"""
The name of the template.
"""
description: Optional[str] = Field(None, example="My Scan Template")
"""
The description of the template.
"""
client_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
ID of the account which owns the template.
"""
organization_id: UUID = Field(..., example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
ID of the organization the template is available in.
"""
agent_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
ID of the explorer which may execute the template.
"""
site_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
ID of the site the template is being used in.
"""
cruncher_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
ID of the runZero cruncher the task is executing on.
"""
created_at: Optional[int] = Field(None, example=1576300370)
"""
Unix timestamp value indicating when the template was created.
"""
created_by: Optional[str] = Field(None, example="user@example.com")
"""
The username of the account which created the template.
"""
created_by_user_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The ID of the account which created the template.
"""
updated_at: Optional[int] = Field(None, example=1576300370)
"""
Unix timestamp value indicating when the template was last modified.
"""
type: Optional[str] = Field(None, example="scan")
"""
The type of task the template creates.
"""
status: Optional[str] = Field(None, example="processed")
"""
The status of the last task using the template.
"""
error: Optional[str] = Field(None, example="agent unavailable")
"""
The error message, if any, of the last task using the template.
"""
params: Optional[Dict[str, str]] = None
"""
A number of task parameter values. Currently there is no authoritative list of in-use values. See existing templates for examples.
"""
stats: Optional[Dict[str, Any]] = None
"""
A map of statistics about the last task executed with the template. Currently there is no authoritative list of in-use values. See existing templates for examples.
"""
hidden: Optional[bool] = Field(None, example=False)
"""
A flag indicating whether the item is hidden from common view.
"""
parent_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The ID of the parent entity of the task scheduled.
"""
recur: Optional[bool] = Field(None, example=False)
"""
A flag representing whether derived tasks are scheduled to repeat.
"""
recur_frequency: Optional[str] = Field(None, example="hourly")
"""
A string time duration value representing execution frequency, if scheduled to repeat. You may use
values including as once, hourly, daily, weekly, monthly, continuous
"""
start_time: Optional[int] = Field(None, example=1576300370)
"""
Unix timestamp representing the next execution time.
"""
recur_last: Optional[int] = Field(None, example=1576300370)
"""
Unix timestamp representing the last execution if scheduled to repeat.
"""
recur_next: Optional[int] = Field(None, example=1576300370)
"""
Unix timestamp representing the next execution if scheduled to repeat.
"""
recur_last_task_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The ID of the task that last executed if scheduled to repeat.
"""
grace_period: Optional[str] = Field(None, example="4")
"""
Additional time beyond hard expiration deadline by which the task may still be allowed to execute.
"""
source_id: Optional[str] = Field(None, example="1")
"""
The numeric ID of the data source, if the task executed with this template is a runZero scan or third party data connection import.
"""
custom_integration_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The ID of the custom integration source, if the last task executed with this template was an import of Asset Data.
"""
template_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The ID of the template.
"""
size_site: Optional[int] = Field(None, example=0)
"""
The size in assets of the site the last task the template was executed against.
"""
size_data: Optional[int] = Field(None, example=0)
"""
The total size of result data of the last task the template was used with.
"""
size_results: Optional[int] = Field(None, example=0)
"""
The number of results in the last task the template was used with.
"""
hosted_zone_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The ID of the hosted zone that ran the last task the template was used with.
"""
linked_task_count: Optional[int] = Field(None, example=1)
"""
The number of tasks derived from the template.
"""
global_: bool = Field(..., alias="global", example=False)
"""
Whether the template is globally available to all organizations.
"""
acl: Dict[str, Any] = Field(..., example={"e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8": "user"})
"""
A map of IDs to strings which describe how the template may be accessed. Currently there is no authoritative list of in-use values. See existing templates for examples.
"""
class Organization(BaseModel):
class Config:
allow_population_by_field_name = True
id: UUID = Field(..., example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
created_at: Optional[int] = Field(None, example=1576300370)
updated_at: Optional[int] = Field(None, example=1576300370)
client_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
download_token: Optional[str] = Field(None, example="DT11226D9EEEA2B035D42569585900")
download_token_created_at: Optional[int] = Field(None, example=1576300370)
permanent: Optional[bool] = Field(None, example=True)
name: str = Field(..., example="My Company")
description: Optional[str] = Field(None, example="All subdivisions of my company")
inactive: Optional[bool] = Field(None, example=False)
deactivated_at: Optional[int] = Field(None, example=0)
service_count: Optional[int] = Field(None, example=10)
service_count_tcp: Optional[int] = Field(None, example=7)
service_count_udp: Optional[int] = Field(None, example=1)
service_count_arp: Optional[int] = Field(None, example=1)
service_count_icmp: Optional[int] = Field(None, example=1)
asset_count: Optional[int] = Field(None, example=100)
export_token: Optional[str] = Field(None, example="ET11226D9EEEA2B035D42569585900")
export_token_created_at: Optional[int] = Field(None, example=1576300370)
export_token_last_used_at: Optional[int] = Field(None, example=0)
export_token_last_used_by: Optional[str] = Field(None, example="127.0.0.1")
export_token_counter: Optional[int] = Field(None, example=0)
project: Optional[bool] = Field(None, example=False)
parent_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
expiration_assets_stale: Optional[int] = Field(None, example=365)
expiration_assets_offline: Optional[int] = Field(None, example=365)
expiration_scans: Optional[int] = Field(None, example=365)
class Site(BaseModel):
class Config:
allow_population_by_field_name = True
id: UUID = Field(..., example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
created_at: Optional[int] = Field(None, example=1576300370)
updated_at: Optional[int] = Field(None, example=1576300370)
permanent: Optional[bool] = Field(None, example=True)
name: str = Field(..., example="Primary")
description: Optional[str] = Field(None, example="Headquarters")
scope: Optional[str] = Field(None, example="192.168.0.0/24")
excludes: Optional[str] = Field(None, example="192.168.0.5")
subnets: Optional[Dict[str, Any]] = None
class SiteOptions(BaseModel):
class Config:
allow_population_by_field_name = True
name: str = Field(..., example="New Site")
description: Optional[str] = Field(None, example="County Office")
scope: Optional[str] = Field(None, example="192.168.10.0/24")
excludes: Optional[str] = Field(None, example="192.168.10.1")
class OrgOptions(BaseModel):
class Config:
allow_population_by_field_name = True
name: Optional[str] = Field(None, example="My Organization")
description: Optional[str] = Field(None, example="Wobbly Widgets, Inc.")
export_token: Optional[str] = Field(None, example="ETXXXXXXXXXXXXXXXX")
parent_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
expiration_assets_stale: Optional[str] = Field(None, example="365", regex="^\\d+$")
expiration_assets_offline: Optional[str] = Field(None, example="365", regex="^\\d+$")
expiration_scans: Optional[str] = Field(None, example="365", regex="^\\d+$")
class Agent(BaseModel):
"""
A deployed service which performs scan tasks.
Explorers may be referred to by their legacy name, Agents.
"""
class Config:
allow_population_by_field_name = True
id: UUID = Field(..., example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
client_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
organization_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
created_at: Optional[int] = Field(None, example=1576300370)
updated_at: Optional[int] = Field(None, example=1576300370)
host_id: Optional[str] = Field(None, example="6f9e6fe52271da70962e007183c5c9c9")
hub_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
name: Optional[str] = Field(None, example="RUNZERO-AGENT")
site_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
last_checkin: Optional[int] = Field(None, example=1576300370)
os: Optional[str] = Field(None, example="Windows")
arch: Optional[str] = Field(None, example="amd64")
version: Optional[str] = Field(
None, example="1.2.3 (build 20191219224016) [fc50c5eefdc3ff5c60533c3c345d14d336396272]"
)
external_ip: Optional[str] = Field(None, example="1.1.1.1")
internal_ip: Optional[str] = Field(None, example="192.168.0.1")
system_info: Optional[Dict[str, Any]] = None
connected: Optional[bool] = Field(None, example=True)
inactive: Optional[bool] = Field(None, example=False)
deactivated_at: Optional[int] = Field(None, example=0)
class AgentSiteID(BaseModel):
class Config:
allow_population_by_field_name = True
site_id: UUID = Field(..., example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
class Explorer(BaseModel):
class Config:
allow_population_by_field_name = True
__root__: Agent
class ExplorerSiteID(BaseModel):
class Config:
allow_population_by_field_name = True
__root__: AgentSiteID
class TaskBase(BaseModel):
"""
All fields of a Task with none required
"""
class Config:
allow_population_by_field_name = True
id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
name: Optional[str] = Field(None, example="Hourly Scan")
description: Optional[str] = Field(None, example="Scan the headquarters hourly")
template_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
client_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
organization_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
agent_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
hosted_zone_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The ID of the Hosted Zone which executes the task. If the
"""
site_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
cruncher_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
created_at: Optional[int] = Field(None, example=1576300370)
created_by: Optional[str] = Field(None, example="user@example.com")
created_by_user_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
custom_integration_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The ID of the custom integration source, if the last task executed with this template was an import of Asset Data.
"""
source_id: Optional[int] = Field(None, example=1)
"""
The numeric ID of the data source, if the task executed with this template is a runZero scan or third party data connection import.
"""
updated_at: Optional[int] = Field(None, example=1576300370)
type: Optional[str] = Field(None, example="scan")
status: Optional[str] = Field(None, example="processed")
error: Optional[str] = Field(None, example="agent unavailable")
params: Optional[Dict[str, str]] = None
stats: Optional[Dict[str, Any]] = None
hidden: Optional[bool] = Field(None, example=False)
parent_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
recur: Optional[bool] = Field(None, example=False)
recur_frequency: Optional[str] = Field(None, example="hourly")
start_time: Optional[int] = Field(None, example=1576300370)
recur_last: Optional[int] = Field(None, example=1576300370)
recur_next: Optional[int] = Field(None, example=1576300370)
recur_last_task_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
class Task(TaskBase):
"""
A task object
"""
class Config:
allow_population_by_field_name = True
id: UUID = Field(..., example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
class TaskOptions(TaskBase):
"""
Options which can be set to create or modify a task.
"""
class Config:
allow_population_by_field_name = True
hosted_zone_name: Optional[str] = Field(None, example="auto")
"""
The string 'auto' will use any available hosted zone. Otherwise, provide the string name (hostedzone1) of the hosted zone.
"""
class HostedZone(BaseModel):
"""
A hosted service which performs scan tasks. Hosted zones are only available to
Enterprise customers.
"""
class Config:
allow_population_by_field_name = True
id: UUID = Field(..., example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The ID of the hosted zone
"""
name: Optional[str] = Field(None, example="zone1")
enabled: Optional[bool] = Field(None, example=True)
"""
Whether the hosted zone is enabled
"""
updated_at: Optional[datetime] = Field(None, example="2023-03-06T18:14:50.52Z")
"""
The last modification time of the hosted zone
"""
processor_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The processor ID assigned to the hosted zone
"""
explorers_concurrency: Optional[int] = Field(None, example=0)
"""
The number of concurrent explorer tasks that can be executed
"""
explorers_total: Optional[int] = Field(None, example=0)
"""
The number of explorers available in the zone
"""
tasks_active: Optional[int] = Field(None, example=0)
"""
The number of tasks executing in the zone
"""
tasks_waiting: Optional[int] = Field(None, example=0)
"""
The number of tasks waiting to execute in the zone
"""
organization_id: Optional[UUID] = Field(None, example="e77602e0-3fb8-4734-aef9-fbc6fdcb0fa8")
"""
The ID of the organization the hosted zone is assigned to
"""
class Problem(BaseModel):
"""
RFC7807 Problem JSON object from https://opensource.zalando.com/restful-api-guidelines/models/problem-1.0.1.yaml without the standard 'type' and 'instance' fields.
"""
class Config:
allow_population_by_field_name = True
title: Optional[str] = Field(None, example="some title for the error situation")
"""
A short summary of the problem type. Written in English and readable for engineers, usually not suited for non technical stakeholders and not localized.
"""
status: Optional[int] = Field(None, ge=100, lt=600)
"""
The HTTP status code generated by the origin server for this occurrence of the problem.
"""
detail: Optional[str] = Field(None, example="some description for the error situation")
"""
A human readable explanation specific to this occurrence of the problem that is helpful to locate the problem and give advice on how to proceed. Written in English and readable for engineers, usually not suited for non technical stakeholders and not localized.
""" | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/types/_data_models_gen.py | 0.806662 | 0.241428 | _data_models_gen.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Optional
@dataclass
class RateLimitInformation:
"""Describes rate limits to API users.
See https://www.runzero.com/docs/leveraging-the-api/#api-client-credentials for details.
:param usage_limit: int, optional which holds the number of API requests possible. The value is related to
the license associated with the caller.
:type usage_limit: int or None
:param usage_remaining: int, optional which holds the number of API requests left before the request
is rejected by the server with a rate limit error message.
:type usage_remaining: int or None
:param usage_today: int, optional which holds the number of API requests made in the current day period.
:type usage_today: int or None
:param usage_total: int, optional which holds the number of API requests made with the given credential.
:type usage_total: int or None
"""
usage_limit: Optional[int]
usage_remaining: Optional[int]
usage_today: Optional[int]
usage_total: Optional[int]
@classmethod
def from_headers(cls, headers: Any) -> RateLimitInformation:
"""Class constructor method"""
try:
return cls(
usage_limit=int(headers["X-API-Usage-Limit"]),
usage_remaining=int(headers["X-API-Usage-Remaining"]),
usage_today=int(headers["X-API-Usage-Today"]),
usage_total=int(headers["X-API-Usage-Total"]),
)
except (AttributeError, ValueError, KeyError):
# unlikely, but requests defines headers as 'any' though it is
# typically their own case-insensitive dict type.
# and the runZero server should always return numeric data
pass
return cls(
usage_limit=None,
usage_remaining=None,
usage_today=None,
usage_total=None,
)
def __str__(self) -> str:
"""A friendly string"""
return (
f"Limit: {self.usage_limit}, Remaining: {self.usage_remaining}, Usage Today: {self.usage_today}, Total "
f"Usage: {self.usage_total}"
) | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/types/_rate_limit_information.py | 0.92756 | 0.306864 | _rate_limit_information.py | pypi |
from typing import Any, Iterable, Optional
from pydantic import Field
from ._data_models_gen import CustomAttribute as RESTCustomAttribute
from ._data_models_gen import CustomIntegration as RESTCustomIntegration
from ._data_models_gen import Hostname as RESTHostname
from ._data_models_gen import ScanOptions as RESTScanOptions
from ._data_models_gen import ScanTemplate as RESTScanTemplate
from ._data_models_gen import ScanTemplateOptions as RESTScanTemplateOptions
from ._data_models_gen import Tag as RESTTag
class CustomIntegration(RESTCustomIntegration):
"""CustomIntegration represents a custom asset data source for custom integrations use"""
# The REST API uses base-64 encoded strings, but inside this SDK
# we want always use bytes and hide the transport encoding.
# Note that it's unsafe to override types in a subclass
# according to mypy - it would be better to compose than
# inherit. But we are wrapping our own API :)
icon: Optional[bytes] = Field( # type: ignore[assignment]
None,
max_length=200000,
)
"""
bytes of png formatted image with maximum size 32x32 pixels
"""
class CustomAttribute(RESTCustomAttribute):
"""
CustomAttribute is a string key / value pair from an external custom asset data source.
"""
def __init__(self, attr: str):
super().__init__(__root__=attr)
class Hostname(RESTHostname):
"""
Hostname the dns name the asset is assigned or reachable at.
This can be a fully-qualified hostname with the domain name, or
a short hostname.
"""
def __init__(self, hostname: str):
super().__init__(__root__=hostname)
class Tag(RESTTag):
"""
Tag is an arbitrary string classifier applied to the asset.
"""
def __init__(self, tag: str):
super().__init__(__root__=tag)
class ScanOptions(RESTScanOptions):
"""Options which can be set to create or modify a scan."""
# enable kebab-case json response
def json(self, *args: Iterable[Any], **kwargs: Any) -> str:
"""Ensure kebab-case is kept when converting to JSON"""
kwargs.setdefault("by_alias", True)
return super().json(*args, **kwargs)
class ScanTemplate(RESTScanTemplate):
"""A scan template object"""
# enable kebab-case json response
def json(self, *args: Iterable[Any], **kwargs: Any) -> str:
"""Ensure kebab-case is kept when converting to JSON"""
kwargs.setdefault("by_alias", True)
return super().json(*args, **kwargs)
class ScanTemplateOptions(RESTScanTemplateOptions):
"""Options which can be set to create or modify a scan template."""
# enable kebab-case json response
def json(self, *args: Iterable[Any], **kwargs: Any) -> str:
"""Ensure kebab-case is kept when converting to JSON"""
kwargs.setdefault("by_alias", True)
return super().json(*args, **kwargs) | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/types/_wrapped.py | 0.934634 | 0.207375 | _wrapped.py | pypi |
from __future__ import annotations
from enum import Enum
from typing import Any, Optional
from urllib.parse import urlparse
import requests
from pydantic import BaseModel
from requests.exceptions import ConnectionError as RequestsConnectionError
from requests.exceptions import ConnectTimeout as RequestsConnectTimeout
from requests.exceptions import ContentDecodingError
from requests.exceptions import HTTPError as RequestsHTTPError
from runzero.types import RateLimitInformation
from ._http.auth import OAuthToken, RegisteredAPIClient
from ._http.io import Request, Response
from .errors import AuthError
class Client:
"""
The authenticated connection to your runZero service destination.
A client must be built and provided to objects which interact
with the runZero API. It is responsible for authentication
and communication, and instantiation should be your first step
in doing just about anything in this SDK.
:param account_key: Optional account key, sometimes known as client key, which
is a high-privilege key which grants the holder the rights of an org_key across
all organizations in the account, as well as additional administrative actions.
Account keys are 30-character hexadecimal strings that with 'CT'. Set this value,
use an org_key, or use Oauth by calling register_api_client(). OAuth should be
preferred.
:param org_key: Optional organization key which grants the holder rights
to operations confined to a specific runZero organization.
Org keys are 30-character hexadecimal strings that with 'OT'. Set this value,
use an account_key, or use Oauth by calling register_api_client(). OAuth should be
preferred.
:param server_url: Optional URL to the server hosting the API. Self-hosted API
server targets must provide the server url in string form,
e.g. 'https://runzero.local:8443'
If not provided, the default hosted infrastructure URL
'https://console.runzero.com' is used.
:type server_url: str
:param validate_certificate: Optional bool to change whether Client checks
the validity of the API server's certificate before proceeding. We recommend
not setting this to false unless you are doing local development or testing.
Ignoring certificate validation errors can result in credential theft or other
bad outcomes.
:type validate_certificate: bool
"""
__default_timeout__ = 30
__default_server_url__ = "https://console.runzero.com"
class _Paths(str, Enum):
"""Enum of resource paths for the runZero APIs"""
TOKEN = "api/v1.0/account/api/token"
class _AuthScope(Enum):
"""Enum of auth scopes for the runZero APIs"""
ACCOUNT = 1
ORG = 2
def __init__(
self,
account_key: Optional[str] = None,
org_key: Optional[str] = None,
server_url: Optional[str] = None,
timeout_seconds: Optional[int] = None,
validate_certificate: Optional[bool] = None,
):
"""Constructor method"""
self.__account_key: Optional[str] = account_key
self.__org_key: Optional[str] = org_key
self._use_token: bool = False
self.__client_id: Optional[str] = None
self.__client_secret: Optional[str] = None
self.__token: Optional[OAuthToken] = None
server_url = server_url or self.__default_server_url__
parsed = urlparse(server_url)
if not all([parsed.scheme, parsed.netloc]):
raise ValueError(f"Url {server_url} is not valid")
if not parsed.scheme == "https":
raise ValueError(f"Url {server_url} must be https")
self.server_url = server_url
if timeout_seconds is not None and timeout_seconds <= 0:
raise ValueError("Timeout must be greater than 0")
self._timeout = timeout_seconds or self.__default_timeout__
if validate_certificate is None:
self._validate_cert = True
else:
self._validate_cert = validate_certificate
self._rate_limit_information: Optional[RateLimitInformation] = None
@property
def oauth_token_is_expired(self) -> bool:
"""Returns true if the oauth token is no longer valid.
Note that the token is automatically refreshed for you when
possible. This value doesn't need to be checked and manually
refreshed in most cases.
:returns: bool: indicating whether the oauth token is expired.
If OAuth is not in-use with the client, value is always false.
"""
if self.__token:
return self.__token.is_expired()
return False
@property
def oauth_active(self) -> bool:
"""
Returns true if the OAuth is in use.
This happens when register_api_client was called successfully
:returns: bool: indicating whether the oauth token is expired.
If there is no oauth used with the client, value is always false.
"""
return self._use_token
def oauth_login(self, client_id: str, client_secret: str) -> None:
"""
Registers the runZero SDK client using OAuth credentials, and
enables the Client to use OAuth.
To obtain a client ID and client secret, see the API Clients area
of the product. Generation of these values is restricted to account
administrators.
:param client_id: The client ID for the runZero registered API client
:param client_secret: The client secret for the runZero registered API client
:raises: AuthError: Exception for invalid OAuth configurations
"""
self.__client_id = client_id
self.__client_secret = client_secret
self._use_token = True
return self._login()
@property
def url(self) -> str:
"""The url of the server
:returns: str: The URL of the runZero server
"""
return self.server_url
# This is only used if the auth type is a registered api client
def _login(self) -> None:
"""
Attempts to use the client secret and client id to generate an OAuth token.
:raises: AuthError
"""
if not self._use_token or (self.__client_id is None or self.__client_secret is None):
raise AuthError("invalid auth configuration")
try:
resp = requests.post(
f"{self.server_url}/{self._Paths.TOKEN.value}",
data=RegisteredAPIClient(self.__client_id, self.__client_secret).register(),
timeout=self._timeout,
verify=self._validate_cert,
)
resp.raise_for_status()
self.__token = resp.json(object_hook=OAuthToken.parse_obj)
except (
RequestsConnectTimeout,
RequestsConnectionError,
RequestsHTTPError,
ContentDecodingError,
ConnectionRefusedError,
) as exc:
raise AuthError("failed to authenticate") from exc
def _get_auth_token(self, scope: _AuthScope) -> str:
"""
Validates and resolves the bearer token to use depending on the provided API scope.
Will also refresh the OAuth token if it's about to expire.
:param scope: Authentication scope for the requested credential to resolve
:returns: Bearer token for the required API scope
:rtype string
:raises: AuthError
"""
self._validate_scope_permissions(scope)
if self._use_token:
if self.__token is not None:
# this handles refreshing the token if necessary
if self.__token.is_expired():
self._login()
return self.__token.access_token
if scope == self._AuthScope.ACCOUNT:
if self.__account_key is not None:
return self.__account_key
if scope == self._AuthScope.ORG:
if self.__account_key is not None:
return self.__account_key
if self.__org_key is not None:
return self.__org_key
raise AuthError("invalid credential configurations")
def _validate_scope_permissions(self, scope: _AuthScope) -> None:
if self._use_token:
if self.__token is None:
raise AuthError("no valid OAuth token")
return
if scope is self._AuthScope.ACCOUNT:
if self.__account_key is None:
raise AuthError("missing account key")
return
if scope is self._AuthScope.ORG:
if self.__org_key is None:
if self.__account_key is None:
raise AuthError("missing organization or account key")
return
@property
def last_rate_limit_information(self) -> Optional[RateLimitInformation]:
"""
The last rate limit information retrieved from the server.
:returns: Rate limit information when provided.
"""
return self._rate_limit_information
@property
def timeout(self) -> int:
"""
The set request timeout value in seconds
:returns: timeout in seconds
"""
return self._timeout
@property
def validate_cert(self) -> bool:
"""
Boolean indicating whether the https cert must valid before proceeding.
:returns: true if certficate information is validated, false if not
"""
return self._validate_cert
def execute(
self,
method: str,
endpoint: str,
params: Optional[Any] = None,
data: Optional[BaseModel] = None,
files: Optional[Any] = None,
multipart: Optional[bool] = None,
) -> Response:
"""Executes the request
:param method: The REST verb to use
:param endpoint: The path to execute against
:param params: URL query parameters
:param data: The data to send in form body (POST, PATCH, PUT)
:param files: For multipart form data or file uploads. Format varies.
:param multipart: True if using a multipart form data (combination file[s] and form data)
:returns: The result of the execution as class:.`Response`
:raises: ValidationError, ConnTimeoutError, ConnError, CommunicationError
"""
token: str = ""
try:
token = self._get_auth_token(self._AuthScope.ACCOUNT)
except AuthError:
pass
if not token:
token = self._get_auth_token(self._AuthScope.ORG)
form_data = None
if data:
form_data = data.json()
resp = Request(
url=f"{self.url}/{endpoint}",
token=token,
method=method,
handlers=None,
params=params,
timeout=self.timeout,
validate_certificate=self.validate_cert,
data=form_data,
files=files,
multipart=multipart,
).execute()
self._rate_limit_information = resp.rate_limit_information
return resp | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/client/client.py | 0.884299 | 0.205197 | client.py | pypi |
from __future__ import annotations
from typing import Optional
from runzero.errors import APIError, Error
from runzero.types import RateLimitInformation
from runzero.types.errors import RFC7807Error
class UnsupportedRequestError(ValueError, Error):
"""
UnsupportedRequestError is a named Exception class representing any Error from the runZero API
which cannot be properly interpreted into a friendlier form.
"""
def __init__(self, message: str):
super().__init__(message)
class ErrInfo(RFC7807Error):
"""runZero's implementation of RFC7807 JSON error description"""
def __init__(self, detail: str, status: int, title: str):
super().__init__(title=title, status=status, detail=detail)
def __repr__(self) -> str:
return f"Error: Details:{self.detail}, Status:{self.status}, Title:{self.title}"
class ClientError(APIError):
"""
ClientError is a named Exception class for holding 400 level http status code messages.
:param error_info: :class:`.ErrInfo`, optional which holds message data parsed from the
server's response.
:type error_info: ErrInfo
:param unparsed_response: a string which holds the unparsed response body.
:type unparsed_response: str, optional
:param message: A top-level error description. The default value None provides a reasonable
message.
"""
def __init__(
self,
message: Optional[str] = None,
unparsed_response: Optional[str] = None,
error_info: Optional[ErrInfo] = None,
):
"""Constructor method"""
if message:
message = message.strip()
if not message:
message = "The request was rejected by the server."
super().__init__(message)
self.error_info: Optional[ErrInfo] = error_info
self.unparsed_response: Optional[str] = unparsed_response
def __str__(self) -> str:
"""Provide a friendly, printable error string. Otherwise, only 'message' is printed."""
out = f"{super().__str__()}".strip()
if self.error_info:
out = f"{out}: {self.error_info}"
return out
class ServerError(APIError):
"""
ServerError is a named Exception class for holding 500 level http status code messages.
A ServerError indicates nothing about the way the request was performed. The server cannot
complete the task. You should retry or abort.
:param error_info: :class:`.ErrInfo`, optional which holds message data parsed from the
server's response.
:type error_info: ErrInfo
:param message: A top-level error description. The default value None provides a reasonable
message.
:param unparsed_response: optional string which holds the unparsed response body.
:type unparsed_response: str, optional
"""
def __init__(
self,
message: Optional[str] = None,
unparsed_response: Optional[str] = None,
error_info: Optional[ErrInfo] = None,
):
"""Constructor method"""
if message:
message = message.strip()
if not message:
message = "The server encounter an error or is unable to process the request."
super().__init__(message)
self.error_info: Optional[ErrInfo] = error_info
self.unparsed_response: Optional[str] = unparsed_response
def __str__(self) -> str:
"""Provide a friendly, printable error string. Otherwise, only 'message' is printed."""
out = f"{super().__str__()}"
if self.error_info:
out = f"{out}: {self.error_info}"
return out
class AuthError(APIError):
"""
AuthError is a named Exception class for authentication issues with the runZero SDK client
Common types of authentication issues are:
* Incorrect credentials
* Misconfigured credentials
* Missing credentials
"""
pass
class RateLimitError(APIError):
"""
RateLimitError is a named Exception class errors resulting from API rate limiting.
See https://www.runzero.com/docs/leveraging-the-api/#api-client-credentials for details.
Consider an exponential backoff retry, or a more calculated approach by examining the returned
numbers.
:param message: A top-level error description. The default value None provides a reasonable
message.
:param unparsed_response: optional string which holds the unparsed response body.
:type unparsed_response: str, optional
:param rate_limit_information: a RateLimitInformation object which holds the rate limit data
:type rate_limit_information: RateLimitInformation
"""
def __init__(
self,
rate_limit_information: RateLimitInformation,
message: Optional[str] = None,
unparsed_response: Optional[str] = None,
):
"""Constructor method"""
if not message:
message = (
"Too many API requests for licensed rate limit. See runZero documentation for details on API "
"rate limiting."
)
super().__init__(message)
self.message = message
self.unparsed_response: Optional[str] = unparsed_response
self.rate_limit_information: RateLimitInformation = rate_limit_information
def __str__(self) -> str:
return f"{self.message} Rate limit information: {self.rate_limit_information}"
class UnknownAPIError(APIError):
"""
UnknownAPIError is a named Exception class raised when the response indicates a structured
error message that cannot be parsed.
Effort is made to receive and interpret errors returned from runZero services. These
errors should be rare to non-existent.
:param message: A top-level error description. The default value None provides a reasonable
message.
:param unparsed_response: optional string which holds the unparsed response body.
:type unparsed_response: str, optional
"""
def __init__(self, message: Optional[str] = None, unparsed_response: Optional[str] = None):
"""Constructor method"""
if not message:
message = "The server encounter an error or is unable to process the request."
super().__init__(message)
self.unparsed_response: Optional[str] = unparsed_response
class CommunicationError(Error):
"""
CommunicationError is a named Exception class raised when an API request to runZero
service cannot complete due to a protocol-level error.
"""
pass
class ConnError(Error, ConnectionError):
"""
ConnError is a named Exception class raised when an API request to runZero
service cannot complete due to a packet-level error.
"""
pass
class ConnTimeoutError(Error, TimeoutError):
"""
ConnTimeoutError is a named Exception class raised when an API request to runZero
service cannot complete due a failure to create or maintain a connection to a
runZero resource. The timeout value of the Client can be adjusted.
"""
pass | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/client/errors.py | 0.953384 | 0.322499 | errors.py | pypi |
from datetime import datetime
from typing import Dict
import requests
from pydantic import BaseModel, Field
from requests.auth import AuthBase
class OAuthToken(BaseModel):
"""Handles OAuth tokens for the runZero platform"""
access_token: str = Field(...)
token_type: str = Field(...)
expires_in: int = Field(...)
created_at: datetime = datetime.now()
def is_expired(self) -> bool:
"""
Determines if the oauth token is expired or will expire within a minute
:returns: Returns a bool of whether the token is expired or about to
"""
delta = self.created_at - datetime.now()
return not delta.total_seconds() <= (self.expires_in + 60)
class BearerToken(AuthBase):
"""Implements bearer token authentication scheme"""
def __init__(self, token: str):
self._token: str = token
def __call__(self, r: requests.PreparedRequest) -> requests.PreparedRequest:
"""
Attaches the bearer token to the request headers
:param r: the calling requests object - which is a requests.PreparedRequest
:returns: the calling object
"""
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class RegisteredAPIClient(AuthBase):
"""Handles the runZero API client registration to retrieve a bearer token"""
def __init__(self, client_id: str, client_secret: str):
self._client_id: str = client_id
self._client_secret: str = client_secret
def __call__(self, r: requests.PreparedRequest) -> requests.PreparedRequest:
"""
Attach appropriate headers to the request for api client registration
:param r: the calling requests object - which is a requests.PreparedRequest
:returns: the calling object
"""
r.headers["Content-Type"] = "application/x-www-form-urlencoded"
return r
def register(self) -> Dict[str, str]:
"""
Uses the provided OAuth credentials to construct the url for requesting the OAuth bearer token.
:returns: dict containing the required components to be urlencoded for OAuth authentication
"""
return {"grant_type": "client_credentials", "client_id": self._client_id, "client_secret": self._client_secret} | /runzero_sdk-0.4.0-py3-none-any.whl/runzero/client/_http/auth.py | 0.899501 | 0.192634 | auth.py | pypi |
from ruobr_api.exceptions import (
AuthenticationException,
NoChildrenException,
NoSuccessException,
)
import httpx
import base64
from datetime import date, datetime
from typing import List, Union
class Ruobr(object):
"""Класс для доступа к API электронного дневника"""
def __init__(self, username: str, password: str):
# Логин и пароль должны быть закодированы в base64
self.username = base64.b64encode(username.upper().encode("UTF-8")).decode(
"UTF-8"
)
self.password = base64.b64encode(password.encode("UTF-8")).decode("UTF-8")
self.is_applicant = None # Является ли профиль родительским
self.is_authorized = False # Авторизован ли профиль
self.is_empty = None # Является ли профиль пустым (без детей)
self.child = 0 # Номер ребёнка, если профиль родительский
self._children = None
def _check_authorized(self):
if not self.is_authorized:
self.get_user()
def _check_empty(self):
if self.is_empty:
raise NoChildrenException("На аккаунте нет детей")
@property
def user(self) -> dict:
if self.is_authorized and not self.is_empty:
return self._children[self.child]
return None
def _get(self, target: str) -> dict:
"""Метод для получения данных"""
response = httpx.get(
f"https://api3d.ruobr.ru/{target}",
headers={"password": self.password, "username": self.username},
)
try:
response = response.json()
except:
raise NoSuccessException(response.text)
if isinstance(response, dict):
if "success" in response.keys():
if not (response["success"]):
if "error" in response.keys():
raise NoSuccessException(response["error"])
if "error_type" in response.keys():
# не уверен, что это всё ещё работает
if response["error_type"] == "auth":
raise AuthenticationException(
"Проверьте логин и/или пароль"
)
raise NoSuccessException(response["error_type"])
raise NoSuccessException(response)
return response
def get_user(self) -> dict:
"""Авторизует и возвращает информацию об ученике
После авторизации информация доступна в поле user
Если профиль родительский, измените поле child для выбора ребёнка"""
if self.user is not None:
return self.user
user = self._get("user/")
if not user:
raise AuthenticationException("Проверьте логин и/или пароль")
if user["status"] == "applicant":
self.is_applicant = True
self._children = user["childs"]
else:
self.is_applicant = False
self._children = [user]
self.is_authorized = True
self.is_empty = len(self._children) == 0
return self.user
def get_children(self) -> List[dict]:
"""Возвращает список детей текущего аккаунта (для обработки родительских аккаунтов)"""
self._check_authorized()
return self._children
def get_mail(self) -> List[dict]:
"""Возвращает почту
Если в сообщении type_id == 2, то last_msg_text содержит HTML-разметку"""
self._check_authorized()
return self._get("mail/")["messages"]
def get_message(self, message_id: int) -> dict:
"""Возвращает подробную информацию о сообщении
Падает c ошибкой 502 Bad Gateway, если в сообщении type_id == 2"""
self._check_authorized()
self._check_empty()
return self._get(f"mail/{message_id}/?child={self.user['id']}")["data"]
def get_recipients(self) -> List[dict]:
"""Возвращает доступных получателей сообщения"""
self._check_authorized()
return self._get("mail/new/")["data"]
def get_achievements(self) -> dict:
"""Возвращает список достижений"""
self._check_authorized()
self._check_empty()
return self._get(f"achievements/?child={self.user['id']}")["data"]
def get_control_marks(self) -> List[dict]:
"""Возвращает итоговые оценки"""
self._check_authorized()
self._check_empty()
return self._get(f"controlmark/?child={self.user['id']}")
def get_all_marks(self, period: int, subject_id: int) -> dict:
"""Возвращает все оценки по предмету за период. Может быть пустым"""
self._check_authorized()
self._check_empty()
return self._get(f"all_marks/{period}/{subject_id}/?child={self.user['id']}")[
"data"
]
def get_events(self) -> dict:
"""Возвращает события"""
self._check_authorized()
self._check_empty()
return self._get(f"btm/?child={self.user['id']}")
def get_certificate(self) -> dict:
"""Возвращает информацию о сертификате"""
self._check_authorized()
self._check_empty()
return self._get(f"do/cert/?child={self.user['id']}")["data"]
def get_birthdays(self) -> List[dict]:
"""Возвращает дни рождения"""
self._check_authorized()
self._check_empty()
return self._get(f"birthday/?child={self.user['id']}")["data"]
def get_food_info(self, _date: Union[str, date, datetime] = None) -> dict:
"""Возвращает информацию о питании. Может быть пустым"""
self._check_authorized()
self._check_empty()
if _date is None:
_date = datetime.now()
if isinstance(_date, (date, datetime)):
_date = _date.strftime("%Y-%m-%d")
return self._get(
f"food/calendary/?child={self.user['id']}&food_type={self.user['school_is_food']}&selected_date={_date}&food_menu_complex=1"
)["data"]
def get_classmates(self) -> List[dict]:
"""Возвращает информацию об одноклассниках"""
self._check_authorized()
self._check_empty()
return self._get(f"odnoklassniki/?child={self.user['id']}")["data"]
def get_books(self) -> List[dict]:
"""Возвращает информацию о взятых книгах"""
self._check_authorized()
self._check_empty()
return self._get(f"book/?child={self.user['id']}")["data"]
def get_useful_links(self) -> dict:
"""Возвращает полезные ссылки"""
self._check_authorized()
self._check_empty()
return self._get(f"ios/?child={self.user['id']}")["data"]
def get_guide(self) -> dict:
"""Возвращает информацию об учебном заведении"""
self._check_authorized()
self._check_empty()
return self._get(f"guide/?child={self.user['id']}")["data"]
def get_timetable(
self, start: Union[str, date, datetime], end: Union[str, date, datetime]
) -> List[dict]:
"""Возвращает дневник целиком
Пример даты: '2020-04-27'"""
self._check_authorized()
self._check_empty()
if isinstance(start, (date, datetime)):
start = start.strftime("%Y-%m-%d")
if isinstance(end, (date, datetime)):
end = end.strftime("%Y-%m-%d")
return self._get(
f"timetable2/?start={start}&end={end}&child={self.user['id']}"
)["lessons"]
class AsyncRuobr(Ruobr):
"""Класс для доступа к новому API электронного дневника"""
async def _check_authorized(self):
if not self.is_authorized:
await self.get_user()
async def _get(self, target: str) -> dict:
"""Метод для получения данных"""
async with httpx.AsyncClient() as client:
response = await client.get(
f"https://api3d.ruobr.ru/{target}",
headers={"password": self.password, "username": self.username},
)
try:
response = response.json()
except:
raise NoSuccessException(response.text)
if isinstance(response, dict): # В случае ошибки возвращается словарь
if "success" in response.keys():
if not (response["success"]):
if "error" in response.keys():
raise NoSuccessException(response["error"])
if "error_type" in response.keys():
if response["error_type"] == "auth":
raise AuthenticationException(
"Проверьте логин и/или пароль"
)
raise NoSuccessException(response["error_type"])
raise NoSuccessException(response)
return response
async def get_user(self) -> dict:
"""Авторизует и возвращает информацию об ученике
После авторизации информация доступна в поле user
Если профиль родительский, измените поле child для выбора ребёнка"""
if self.user is not None:
return self.user
user = await self._get("user/")
if not user:
raise AuthenticationException("Проверьте логин и/или пароль")
if user["status"] == "applicant":
self.is_applicant = True
self._children = user["childs"]
else:
self.is_applicant = False
self._children = [user]
self.is_authorized = True
self.is_empty = len(self._children) == 0
return self.user
async def get_children(self) -> List[dict]:
"""Возвращает список детей текущего аккаунта (для обработки родительских аккаунтов)"""
await self._check_authorized()
return self._children
async def get_mail(self) -> List[dict]:
"""Возвращает почту
Если в сообщении type_id == 2, то last_msg_text содержит HTML-разметку"""
await self._check_authorized()
result = await self._get("mail/")
return result["messages"]
async def get_message(self, message_id: int) -> dict:
"""Возвращает подробную информацию о сообщении
Падает, если в сообщении type_id == 2"""
await self._check_authorized()
self._check_empty()
result = await self._get(f"mail/{message_id}/?child={self.user['id']}")
return result["data"]
async def get_recipients(self) -> List[dict]:
"""Возвращает доступных получателей сообщения"""
# TODO: возможность отправки сообщений
await self._check_authorized()
result = await self._get("mail/new/")
return result["data"]
async def get_achievements(self) -> dict:
"""Возвращает список достижений"""
await self._check_authorized()
self._check_empty()
result = await self._get(f"achievements/?child={self.user['id']}")
return result["data"]
async def get_control_marks(self) -> List[dict]:
"""Возвращает итоговые оценки"""
await self._check_authorized()
self._check_empty()
return await self._get(f"controlmark/?child={self.user['id']}")
async def get_all_marks(self, period: int, subject_id: int) -> dict:
"""Возвращает все оценки по предмету за период. Может быть пустым"""
await self._check_authorized()
self._check_empty()
result = await self._get(
f"all_marks/{period}/{subject_id}/?child={self.user['id']}"
)
return result["data"]
async def get_events(self) -> dict:
"""Возвращает события"""
await self._check_authorized()
self._check_empty()
return await self._get(f"btm/?child={self.user['id']}")
async def get_certificate(self) -> dict:
"""Возвращает информацию о сертификате"""
await self._check_authorized()
self._check_empty()
result = await self._get(f"do/cert/?child={self.user['id']}")
return result["data"]
async def get_birthdays(self) -> List[dict]:
"""Возвращает дни рождения"""
await self._check_authorized()
self._check_empty()
result = await self._get(f"birthday/?child={self.user['id']}")
return result["data"]
async def get_food_info(self, _date: Union[str, date, datetime] = None) -> dict:
"""Возвращает информацию о питании. Может быть пустым"""
await self._check_authorized()
self._check_empty()
if _date is None:
_date = datetime.now()
if isinstance(_date, (date, datetime)):
_date = _date.strftime("%Y-%m-%d")
result = await self._get(
f"food/calendary/?child={self.user['id']}&food_type={self.user['school_is_food']}&selected_date={_date}&food_menu_complex=1"
)
return result["data"]
async def get_classmates(self) -> List[dict]:
"""Возвращает информацию об одноклассниках"""
await self._check_authorized()
self._check_empty()
result = await self._get(f"odnoklassniki/?child={self.user['id']}")
return result["data"]
async def get_books(self) -> List[dict]:
"""Возвращает информацию о взятых книгах"""
await self._check_authorized()
self._check_empty()
result = await self._get(f"book/?child={self.user['id']}")
return result["data"]
async def get_useful_links(self) -> dict:
"""Возвращает полезные ссылки"""
await self._check_authorized()
self._check_empty()
result = await self._get(f"ios/?child={self.user['id']}")
return result["data"]
async def get_guide(self) -> dict:
"""Возвращает информацию об учебном заведении"""
await self._check_authorized()
self._check_empty()
result = await self._get(f"guide/?child={self.user['id']}")
return result["data"]
async def get_timetable(
self, start: Union[str, date, datetime], end: Union[str, date, datetime]
) -> List[dict]:
"""Возвращает дневник целиком
Пример даты: '2020-04-27'"""
await self._check_authorized()
self._check_empty()
if isinstance(start, (date, datetime)):
start = start.strftime("%Y-%m-%d")
if isinstance(end, (date, datetime)):
end = end.strftime("%Y-%m-%d")
result = await self._get(
f"timetable2/?start={start}&end={end}&child={self.user['id']}"
)
return result["lessons"] | /ruobr_api-2.0.2.tar.gz/ruobr_api-2.0.2/ruobr_api/__main__.py | 0.546496 | 0.23219 | __main__.py | pypi |
from dataclasses import dataclass
from itertools import groupby,chain
import inspect
import openpyxl
from openpyxl.styles import NamedStyle, PatternFill, Font, Alignment
from openpyxl.utils import get_column_letter
from datetime import datetime
from calendar import monthrange
from dateutil.relativedelta import relativedelta
from .marks import Marks
from .exceptions import DateNotFoundError
def string_year(month_num : int | str , year : int | str) -> str :
if 9 <= month_num <= 12:
return year
else :
return year + 1
def get_student_year(__year : str) -> list :
start = datetime.strptime(f'0109{__year}', "%d%m%Y").date()
dates = [start.strftime('%m.%Y')]+ [(start + relativedelta(months=date)).strftime('%m.%Y') for date in range(1, 10)]
return dates
@dataclass
class _modify_journal_object:
"""
Модификцаия объекта Journal (Отсутствует параметр Subject)
"""
mark : int
date : datetime
def __repr__(self) -> str:
return f"{self.mark}"
@dataclass
class _subject_object:
subject : str
marks : list[_modify_journal_object]
def __repr__(self) -> str:
return f"{self.subject}{self.marks}"
def average(self) -> float | int:
marks = list(chain(*[i.mark for i in self.marks]))
marks = [float(i) for i in marks if i.isdigit()]
average = round(sum(marks) / len(marks),2)
return f"{average:g}"
def marks_row(self, days_quantity : int) -> list[str] :
result = []
only_dates = [int(datetime.strftime(i.date,'%d')) for i in self.marks]
for day_num in range(1, days_quantity + 1):
if day_num in only_dates:
result.append(self.marks[only_dates.index(day_num)].mark)
else:
result.append('')
return result
@dataclass
class _month_object:
"""
Объект месяца, содержащий информацию о каждом месяце ,
включая все оценки ,
которые были получены за этот месяц
"""
def __repr__(self) -> str:
return f"{self.name().capitalize()} {self.data}"
number : int
quantity : int
subjects_list : list
data : list[_subject_object]
def days_list(self) -> list[str] :
days = [i for i in range(1,self.quantity +1)]
while len(days) < 31:
days.append(' ')
return days
def name(self) -> str:
return ['январь', 'февраль', 'март', 'апрель', 'май', 'июнь', 'июль', 'август', 'сентябрь',
'октябрь', 'ноябрь', 'декабрь'][self.number-1]
class _styles_object:
def __init__(self, workbook : openpyxl.Workbook):
self.wb = workbook
# Автоматическое добавление стилей при иницализации объекта стилей
# method_list = [method for method in dir(self.__class__) if method.startswith('__') is False]
method_list = [m for m in self.__class__.__dict__.values() if inspect.isfunction(m) and m != __class__.__init__]
for method in method_list:
method(self)
def neutral(self) -> None:
ns = NamedStyle(name='neutral')
ns.fill = PatternFill(fgColor='f3da0b', fill_type='solid')
ns.font = Font(color='1f0800', name='Bahnscrift', bold=False)
ns.alignment = Alignment(horizontal='center', vertical='center')
self.wb.add_named_style(ns)
setattr(self.__class__, ns.name , ns.name)
def bad(self) -> None:
ns = NamedStyle(name='bad')
ns.fill = PatternFill(fgColor='ff8e7a', fill_type='solid')
ns.font = Font(color='1f0800', name='Bahnscrift', bold=True)
ns.alignment = Alignment(horizontal='center', vertical='center')
self.wb.add_named_style(ns)
setattr(self.__class__, ns.name , ns.name)
class ExcelTable:
def __init__(self, marks : Marks, year : str | int) -> None:
self.MARKS = marks.get_all()
self.workbook = openpyxl.Workbook()
self.sheet = self.workbook['Sheet']
self.style = _styles_object(self.workbook)
self.year = year
def __month_generator(self):
# Обрезка по году (от 20XX-20XX)
croped_marks : list = [i for i in self.MARKS if i.date.strftime('%m.%Y') in get_student_year(self.year)]
if not croped_marks:
raise DateNotFoundError(f"Could not find any data for {self.year}")
months : list = groupby(croped_marks, key = lambda x : x.date.strftime('%m'))
for month, data in months:
# Сортировка данных по уч.предметам
data = list(data)
data.sort(key = lambda x : x.subject)
# Кол-во дней в месяце
daysPM : int = monthrange(year = int(self.year),month=int(month))[1]
# Группировка данных по уч.предметам
lessonsDataPM = groupby(data, key=lambda y : y.subject)
# Замена groupby объектов
lessonsModifyDataPM = []
for subject_name , journals in lessonsDataPM:
journals = list(journals)
journals = list(map(lambda x : _modify_journal_object(mark=x.mark,date=x.date),journals))
lessonsModifyDataPM.append(_subject_object(subject = subject_name,
marks = list(journals)
))
# Список учебных предметов в месяце
lessonsListPM : list = [i.subject for i in lessonsDataPM]
MonthObject : _month_object = _month_object(number = int(month),
data = lessonsModifyDataPM,
subjects_list= lessonsListPM,
quantity=daysPM)
yield MonthObject
def __recolor_bad_marks(self, bad_mark_indices : list[int], style : str) -> None :
for i in bad_mark_indices:
self.sheet.cell(row = self.sheet.max_row,column=i).style = style
def __fill(self) -> None:
for month_object in self.__month_generator():
header = [f"{month_object.name().capitalize()} {string_year(month_object.number,self.year)}"] + month_object.days_list() + ["Средний балл"]
self.sheet.append(header)
# Изменение шрифта для обозначения месяца
self.sheet.cell(row = self.sheet.max_row, column=1).font = "Bahnschrift SemiBold"
self.sheet.cell(row = self.sheet.max_row, column=1).alignment = Alignment(horizontal='center', vertical='center',wrap_text=True)
for subject_object in month_object.data:
body = [subject_object.subject] + subject_object.marks_row(31) + [subject_object.average()]
self.sheet.append(body)
# Изменение шрифта и расположения для названия предмета
self.sheet.cell(row = self.sheet.max_row, column=1).font = "Bahnschrift SemiLight"
# Выделение плохих оценок
two_indices = [i+1 for i, x in enumerate(body[:-1]) if '2' in x]
self.__recolor_bad_marks(two_indices,self.style.bad)
# Выделение средних оценок
three_indices = [i+1 for i, x in enumerate(body[:-1]) if '3' in x]
self.__recolor_bad_marks(three_indices,self.style.neutral)
def __post_processing(self) -> None:
# Изменение размера ячеек
for i in range(2, self.sheet.max_column+1):
self.sheet.column_dimensions[f'{get_column_letter(i)}'].width = 5
self.sheet.column_dimensions['AG'].width = 10
self.sheet.column_dimensions['A'].width = 75
# Изменение шрифтов
for row in self.sheet[f'B1:AO{self.sheet.max_row}']:
for cell in row:
cell.font = "Bahnschrift SemiLight"
cell.alignment = Alignment(horizontal='center', vertical='center',wrap_text=True)
# Изменение названия листа
self.sheet.title = f'Оценки {self.year}-{self.year + 1}'
def save(self, __file : str | bytes) -> None:
# Заполнение листа данными
self.__fill()
# Пост-обработка
self.__post_processing()
self.workbook.save(__file) | /ruobr_student-1.0.0-py3-none-any.whl/ruobr_student/excel.py | 0.476092 | 0.207696 | excel.py | pypi |
import logging
import random
import sys
import os
from pathlib import Path
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
import torch
import torch.optim as opt
from tqdm.auto import trange
from transformers.models.t5.configuration_t5 import T5Config
from transformers.models.t5.modeling_t5 import T5ForConditionalGeneration
from transformers.models.t5.tokenization_t5 import T5Tokenizer
from utils.data_preparation import make_pairs, train_val_split
from utils.read_config import HParams, get_hparams
from utils.read_jsonl_data import read_jsonl_data
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
SEED = 12345
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
def main(hpararms: HParams):
logger = get_logger(hpararms.experiment_path)
logger.info(hpararms)
logger.info("Datasets loading...")
datasets = [read_jsonl_data(Path(path)) for path in hpararms.data.datasets]
logger.info("Datasets spliting...")
data_train: List[Dict[str, List[Dict[str, Any]]]] = list()
data_val: List[Dict[str, List[Dict[str, Any]]]] = list()
for dataset in datasets:
dataset_train, dataset_val = train_val_split(
dataset, hpararms.data.val_size, True
)
data_train.extend(dataset_train)
data_val.extend(dataset_val)
logger.info(f"Train dialogs: {len(data_train)} Val dialogs: {len(data_val)}")
tokenizer, model, optimizer = get_model(hpararms, logger)
logger.info("Datasets preparation...")
train_df = make_pairs(
data_train,
tokenizer,
hpararms.train.max_history_tokens,
hpararms.train.max_history_messages,
)
val_df = make_pairs(
data_val,
tokenizer,
hpararms.train.max_history_tokens,
hpararms.train.max_history_messages,
)
train_df = pd.DataFrame([("</s>".join(p[0]), p[1]) for p in train_df])
val_df = pd.DataFrame([("</s>".join(p[0]), p[1]) for p in val_df])
train_df = train_df[~train_df.duplicated()]
train_df = train_df[~(train_df[0].isin(val_df[0]) & train_df[1].isin(val_df[1]))]
val_df = val_df.drop_duplicates()
logger.info(f"Train pairs: {len(train_df)} Val pairs: {len(val_df)}")
best_model = train(
train_df,
val_df,
hpararms.train.batch_size,
hpararms.train.report_steps,
hpararms.train.save_steps,
hpararms.train.epochs,
tokenizer,
model,
optimizer,
logger,
)
save_model(best_model, Path(hpararms.experiment_path) / "checkpoints/best_model/")
def get_logger(model_dir, filename="train.log"):
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(os.path.basename(model_dir))
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
if not os.path.exists(model_dir):
os.makedirs(model_dir)
h = logging.FileHandler(os.path.join(model_dir, filename))
h.setLevel(logging.DEBUG)
h.setFormatter(formatter)
logger.addHandler(h)
return logger
def get_model(
hparams: HParams, logger: logging.Logger
) -> Tuple[T5Tokenizer, T5ForConditionalGeneration]:
logger.info("Model loading...")
t5_config = getattr(hparams.model, "t5_config", None)
huggingface_path = getattr(hparams.model, "huggingface_path", None)
assert t5_config or huggingface_path
tokenizer = T5Tokenizer.from_pretrained(hparams.tokenizer.huggingface_path)
if huggingface_path:
model = T5ForConditionalGeneration.from_pretrained(
hparams.model.huggingface_path
)
elif t5_config:
t5_config = T5Config(**hparams.model.t5_config)
model = T5ForConditionalGeneration(t5_config)
model = model.cuda()
optimizer = torch.optim.AdamW(
model.parameters(), lr=hparams.train.lr, weight_decay=hparams.train.weight_decay
)
return tokenizer, model, optimizer
def train(
pairs_train: pd.DataFrame,
pairs_val: pd.DataFrame,
batch_size: int,
report_steps: int,
save_steps: int,
epochs: int,
tokenizer: T5Tokenizer,
model: T5ForConditionalGeneration,
optimizer: opt.Optimizer,
logger: logging.Logger,
) -> T5ForConditionalGeneration:
logger.info("Training starts")
model.train()
losses = []
best_model = model
best_loss = 1000000
for epoch in range(epochs):
logger.info(f"EPOCH {epoch}")
pairs_train = pairs_train.sample(frac=1)
for i in trange(0, int(len(pairs_train) / batch_size), leave=False, position=0):
batch = pairs_train.values[i * batch_size:(i + 1) * batch_size]
x = tokenizer([p[0] for p in batch], return_tensors="pt", padding="longest")
x = {k: v.to(model.device, non_blocking=True) for k, v in x.items()}
# x = x.to(model.device, non_blocking=True)
y = tokenizer([p[1] for p in batch], return_tensors="pt", padding="longest")
y = {k: v.to(model.device, non_blocking=True) for k, v in y.items()}
# y = y.to(model.device, non_blocking=True)
# -100 - специальное значение, позволяющее не учитывать токены
# y.input_ids[y.input_ids == 0] = -100
y["input_ids"][y["input_ids"] == 0] = -100
loss: torch.Tensor = model(
input_ids=x["input_ids"],
attention_mask=x["attention_mask"],
labels=y["input_ids"],
decoder_attention_mask=y["attention_mask"],
return_dict=True,
).loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
losses.append(loss.item())
if i % report_steps == 0:
val_loss = np.round(
eval(pairs_val, batch_size, tokenizer, model, logger), 3
)
train_loss = np.round(np.mean(losses[-report_steps:]), 3)
step = epoch * int(len(pairs_train) / batch_size) + i
logger.info(
f"step {step} | train loss {train_loss} | val loss {val_loss}"
)
if val_loss < best_loss:
best_model = model
best_loss = val_loss
model.train()
if i % save_steps == 0:
step = epoch * int(len(pairs_train) / batch_size) + i
save_model(
model, Path(hpararms.experiment_path) / f"checkpoints/{step}_steps/"
)
return best_model
@torch.no_grad()
def eval(
pairs: pd.DataFrame,
batch_size: int,
tokenizer: T5Tokenizer,
model: T5ForConditionalGeneration,
logger: logging.Logger,
) -> float:
eval_losses = list()
model.eval()
pairs = pairs.sample(frac=1)
for i in trange(0, int(len(pairs) / batch_size), leave=False, position=1):
batch = pairs.values[i * batch_size:(i + 1) * batch_size]
x = tokenizer([p[0] for p in batch], return_tensors="pt", padding="longest")
x = {k: v.to(model.device, non_blocking=True) for k, v in x.items()}
# x = x.to(model.device)
y = tokenizer([p[1] for p in batch], return_tensors="pt", padding="longest")
y = {k: v.to(model.device, non_blocking=True) for k, v in y.items()}
# y = y.to(model.device)
# -100 - специальное значение, позволяющее не учитывать padding
# y.input_ids[y.input_ids == 0] = -100
y["input_ids"][y["input_ids"] == 0] = -100
loss: torch.Tensor = model(
input_ids=x["input_ids"],
attention_mask=x["attention_mask"],
labels=y["input_ids"],
decoder_attention_mask=y["attention_mask"],
return_dict=True,
).loss
eval_losses.append(loss.item())
return np.mean(eval_losses)
def save_model(model: T5ForConditionalGeneration, save_path: Path):
save_path.mkdir(parents=True, exist_ok=True)
model.save_pretrained(save_path)
if __name__ == "__main__":
hpararms = get_hparams()
main(hpararms) | /rupersonaagent-0.1.0-py3-none-any.whl/knowledge_distillation/train.py | 0.558568 | 0.264441 | train.py | pypi |
import argparse
import os
from typing import Tuple
import torch
from transformers.models.t5.modeling_t5 import T5ForConditionalGeneration
from transformers.models.t5.tokenization_t5 import T5Tokenizer
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--tokenizer-pretrained-path",
default="cointegrated/rut5-small-chitchat",
)
parser.add_argument(
"--model-pretrained-path",
default="experiments/exp2-t5-small-chitchat-finetuning/checkpoints/2000_steps",
)
parser.add_argument(
"--device",
default="cuda:0",
)
parser.add_argument(
"--max-history-messages",
default=4,
)
return parser
def main(
tokenizer_pretrained_path: str,
model_pretrained_path: str,
device: str,
max_history_messages: int,
):
print("Model loading...")
tokenizer, model = get_model(tokenizer_pretrained_path, model_pretrained_path)
model = model.eval().to(device)
print("Bot ready!")
history_text = list()
user_text = input("User> ").strip()
history_text.append(user_text)
while user_text.lower() not in {"q", "quit", "пока"}:
history_tmp = "</s>".join(history_text[-max_history_messages:])
answer = generate_answer(history_tmp, tokenizer, model)
print(f" Bot> {answer}")
history_text.append(answer)
user_text = input("User> ").strip()
history_text.append(user_text)
def get_model(
tokenizer_pretrained_path: str, model_pretrained_path: str
) -> Tuple[T5Tokenizer, T5ForConditionalGeneration]:
tokenizer = T5Tokenizer.from_pretrained(tokenizer_pretrained_path)
model = T5ForConditionalGeneration.from_pretrained(model_pretrained_path)
return tokenizer, model
@torch.no_grad()
def generate_answer(
history_text: str, tokenizer: T5Tokenizer, model: T5ForConditionalGeneration
) -> str:
inputs = tokenizer(history_text, return_tensors="pt")
hypotheses = model.generate(
**{k: v.to(model.device) for k, v in inputs.items()},
do_sample=True,
top_p=0.5,
num_return_sequences=1,
repetition_penalty=4.5,
max_length=1024,
)
return tokenizer.decode(hypotheses[0], skip_special_tokens=True)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
main(
args.tokenizer_pretrained_path,
args.model_pretrained_path,
args.device,
args.max_history_messages,
) | /rupersonaagent-0.1.0-py3-none-any.whl/knowledge_distillation/chat.py | 0.731922 | 0.219526 | chat.py | pypi |
```
from typing import List, Dict, Tuple, Any
import torch
from torch.jit import script, trace
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import csv
import random
import re
import os
import unicodedata
import codecs
from io import open
import itertools
import math
import json
from transformers import T5ForConditionalGeneration, T5Tokenizer
from pathlib import Path
from utils.read_jsonl_data import read_jsonl_data
from tqdm.auto import tqdm
SEED = 12345
val_size = 0.05
random.seed(SEED)
# Train data
TRAIN_DATA_DIR = Path("data/Толока Персона Чат")
gk_1_500_path = TRAIN_DATA_DIR / "TolokaPersonaChat_gk_1_500.jsonl"
gk_test_1_500_path = TRAIN_DATA_DIR / "TolokaPersonaChat_1_500_gk_test.jsonl"
test_stipa_path = TRAIN_DATA_DIR / "gk(test)Stipa.jsonl"
genderized_gk_test_v2_path = TRAIN_DATA_DIR / "TolokaPersonaChat_genderized_gk(test)v2.jsonl"
# Test data
TEST_DATA_DIR = Path("data/test")
all_dialogs_path = TEST_DATA_DIR / "all_dialogs.jsonl"
tokenizer = T5Tokenizer.from_pretrained("cointegrated/rut5-small-chitchat")
model = T5ForConditionalGeneration.from_pretrained("cointegrated/rut5-small-chitchat")
def train_val_split(
data: List[Dict[str, str]],
val_size: int = val_size,
shuffle: bool = True,
) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
if shuffle:
random.shuffle(data)
val_len = int(val_size * len(data))
data_train, data_val = data[:-val_len], data[-val_len:]
return data_train, data_val
gk_1_500_data = read_jsonl_data(gk_1_500_path)
gk_test_1_500_data = read_jsonl_data(gk_test_1_500_path)
test_stipa_data = read_jsonl_data(test_stipa_path)
genderized_gk_test_v2_data = read_jsonl_data(genderized_gk_test_v2_path)
gk_1_500_data_train, gk_1_500_data_val = train_val_split(gk_1_500_data, val_size, True)
gk_test_1_500_data_train, gk_test_1_500_data_val = train_val_split(gk_test_1_500_data, val_size, True)
test_stipa_data_train, test_stipa_data_val = train_val_split(test_stipa_data, val_size, True)
genderized_gk_test_v2_data_train, genderized_gk_test_v2_data_val = train_val_split(genderized_gk_test_v2_data, val_size, True)
data_train = gk_1_500_data_train + gk_test_1_500_data_train + test_stipa_data_train + genderized_gk_test_v2_data_train
data_val = gk_1_500_data_val + gk_test_1_500_data_val + test_stipa_data_val + genderized_gk_test_v2_data_val
len(data_train), len(data_val)
def process_text(text: str) -> str:
text = text.strip()
return text
def compress_consecutive_statements(dialog: List[Dict[str, Any]]):
# Сжимаем все подряд идущие высказывания одного спикера
compressed_dialog: List[Dict[str, Any]] = list()
last_person: int = dialog[0]['person']
whole_text = [dialog[0]['text']]
for message in dialog[1:]:
text, person = message['text'], message['person']
if last_person == person:
whole_text.append(text)
else:
new_message = {
"person": last_person,
"text": " ".join(whole_text)
}
compressed_dialog.append(new_message)
last_person = person
whole_text = [text]
new_message = {
"person": last_person,
"text": " ".join(whole_text)
}
compressed_dialog.append(new_message)
return compressed_dialog
def make_pairs(
data: List[Dict[str, List[Dict[str, Any]]]],
tokenizer: T5Tokenizer,
max_history_tokens: int,
max_history_messages: int = 3,
# max_target_tokens: int,
) -> List[Tuple[str, str]]:
# Все пары "история общения -> ответ"
pairs: List[Tuple[str, str]] = list()
for data_item in tqdm(data):
# Пары "история общения -> ответ" в рамках одного диалога
dialog_pairs: List[Tuple[List[str], str]] = list()
# Сжимаем все подряд идущие высказывания одного спикера
dialog = compress_consecutive_statements(data_item['dialog'])
historical_text = [dialog[0]['text']]
for message in dialog[1:]:
text = message['text']
for history_messages_len in range(1, max_history_messages+1):
if len(historical_text) >= history_messages_len:
dialog_pairs.append((historical_text[-history_messages_len:], text))
offset = 0
historical_text = dialog_pairs[-1][0][offset:] + [text]
# historical_text = "</s>".join(historical_text)
while len(tokenizer("</s>".join(historical_text)).input_ids) > max_history_tokens:
offset += 1
historical_text = dialog_pairs[-1][0][offset:] + [text]
# historical_text = "</s>".join(historical_text)
pairs.extend(dialog_pairs)
return pairs
pairs_train = make_pairs(data_train, tokenizer, 512, 4)
pairs_val = make_pairs(data_val, tokenizer, 512, 4)
print(len(pairs_train), len(pairs_val))
import pandas as pd
pairs_train = pd.DataFrame([("</s>".join(p[0]), p[1]) for p in pairs_train]) # .drop_duplicates()
pairs_val = pd.DataFrame([("</s>".join(p[0]), p[1]) for p in pairs_val]) # .drop_duplicates()
pairs_train = pairs_train[~pairs_train.duplicated()]
pairs_train = pairs_train[~(pairs_train[0].isin(pairs_val[0]) & pairs_train[1].isin(pairs_val[1]))]
pairs_val = pairs_val.drop_duplicates()
import torch
from transformers import T5ForConditionalGeneration, T5Tokenizer
# raw_model = 'cointegrated/rut5-base-multitask'
# model = T5ForConditionalGeneration.from_pretrained(raw_model).cuda();
# tokenizer = T5Tokenizer.from_pretrained(raw_model)
device = "cuda:0"
raw_model = "cointegrated/rut5-small-chitchat"
tokenizer = T5Tokenizer.from_pretrained(raw_model)
model = T5ForConditionalGeneration.from_pretrained(raw_model).to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5, weight_decay=0.1)
# optimizer = torch.optim.AdamW(model.parameters(), lr=3e-4, weight_decay=0.2)
from tqdm.auto import trange
import random
import numpy as np
batch_size = 24 # сколько примеров показываем модели за один шаг
report_steps = 200 # раз в сколько шагов печатаем результат
epochs = 15 # сколько раз мы покажем данные модели
@torch.no_grad()
def eval(pairs, tokenizer, model) -> float:
eval_losses = list()
model.eval()
pairs = pairs.sample(frac=1)
for i in range(0, int(len(pairs) / batch_size)):
batch = pairs.values[i * batch_size: (i + 1) * batch_size]
# кодируем вопрос и ответ
x = tokenizer([p[0] for p in batch], return_tensors='pt', padding="longest").to(model.device)
y = tokenizer([p[1] for p in batch], return_tensors='pt', padding="longest").to(model.device)
# -100 - специальное значение, позволяющее не учитывать токены
y.input_ids[y.input_ids == 0] = -100
# вычисляем функцию потерь
loss = model(
input_ids=x.input_ids,
attention_mask=x.attention_mask,
labels=y.input_ids,
decoder_attention_mask=y.attention_mask,
return_dict=True
).loss
eval_losses.append(loss.item())
return np.mean(eval_losses)
model.train()
losses = []
best_model = None
best_loss = 1000000
for epoch in range(epochs):
print('EPOCH', epoch)
pairs_train = pairs_train.sample(frac=1)
for i in trange(0, int(len(pairs_train) / batch_size)):
batch = pairs_train.values[i * batch_size: (i + 1) * batch_size]
# кодируем вопрос и ответ
x = tokenizer([p[0] for p in batch], return_tensors='pt', padding="longest").to(model.device)
y = tokenizer([p[1] for p in batch], return_tensors='pt', padding="longest").to(model.device)
# -100 - специальное значение, позволяющее не учитывать токены
y.input_ids[y.input_ids == 0] = -100
# вычисляем функцию потерь
loss = model(
input_ids=x.input_ids,
attention_mask=x.attention_mask,
labels=y.input_ids,
decoder_attention_mask=y.attention_mask,
return_dict=True
).loss
# делаем шаг градиентного спуска
loss.backward()
optimizer.step()
optimizer.zero_grad()
# печатаем скользящее среднее значение функции потерь
losses.append(loss.item())
if i % report_steps == 0:
val_loss = eval(pairs_val, tokenizer, model)
print('step', i, '| train loss', np.round(np.mean(losses[-report_steps:]), 3), '| val loss', np.round(val_loss, 3))
if val_loss < best_loss:
best_model = model
best_loss = val_loss
model.train()
step = epoch * int(len(pairs_train) / batch_size) + i
save_dir = Path("experiments/exp1-t5-small-chitchat-finetuning") / f"checkpoints/{step}_steps/"
model.save_pretrained(save_dir)
save_dir = Path("experiments/exp1-t5-small-chitchat-finetuning") / "checkpoints/best_model"
best_model.save_pretrained(save_dir)
best_loss
import time
@torch.no_grad()
def answer(history_text: str, model) -> str:
model.eval()
inputs = tokenizer(history_text, return_tensors='pt')
hypotheses = model.generate(
**{k: v.to(model.device) for k, v in inputs.items()},
do_sample=True,
top_p=0.5,
num_return_sequences=1,
repetition_penalty=1.5,
max_length=1024,
)
return tokenizer.decode(hypotheses[0], skip_special_tokens=True)
history_text = ["Привет!"]
print("bot1:", history_text[0])
for idx in range(10):
history_tmp = "</s>".join(history_text[-3:])
text = answer(history_tmp, best_model).replace("<pad>", "").strip()
print(f"bot{idx % 2}:", text)
history_text.append(text)
time.sleep(0.7)
```
| /rupersonaagent-0.1.0-py3-none-any.whl/knowledge_distillation/train.ipynb | 0.560012 | 0.326271 | train.ipynb | pypi |
import argparse
import os
import time
from pathlib import Path
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
import torch
from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu
from tqdm.auto import tqdm, trange
from transformers.models.t5.modeling_t5 import T5ForConditionalGeneration
from transformers.models.t5.tokenization_t5 import T5Tokenizer
from utils.data_preparation import make_pairs
from utils.read_jsonl_data import read_jsonl_data
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--datasets-path",
default="data/test",
)
parser.add_argument(
"--tokenizer-pretrained-path",
default="cointegrated/rut5-small-chitchat",
)
parser.add_argument(
"--model-pretrained-path",
default="experiments/exp2-t5-small-chitchat-finetuning/checkpoints/2000_steps",
)
parser.add_argument(
"--device",
default="cuda:0",
)
parser.add_argument(
"--max-history-tokens",
default=1024,
)
parser.add_argument(
"--max-history-messages",
default=4,
)
parser.add_argument(
"--repeats-count",
default=3,
)
return parser
def main(
datasets_path: List[str],
tokenizer_pretrained_path: str,
model_pretrained_path: str,
device: str,
max_history_tokens: int,
max_history_messages: int,
repeats_count: int,
):
datasets = [read_jsonl_data(path) for path in Path(datasets_path).iterdir()]
data: List[Dict[str, List[Dict[str, Any]]]] = list()
for dataset in datasets:
data.extend(dataset)
tokenizer, model = get_model(tokenizer_pretrained_path, model_pretrained_path)
model = model.eval().to(device)
df = make_pairs(data, tokenizer, max_history_tokens, max_history_messages)
df = pd.DataFrame([("</s>".join(p[0]), p[1]) for p in df])
df = df[~df.duplicated()]
generation_times = list()
total_BLEUs = list()
for generation_idx in trange(repeats_count, position=0, leave=False):
BLEUs = list()
# Группируем по одинаковой истории сообщений
for df_tmp in tqdm(df.groupby(by=0), position=1, leave=False):
_, df_tmp = df_tmp
references = [
tokenizer.tokenize(reference) for reference in df_tmp[1].to_list()
]
for history in df_tmp[0]:
# Генерируем ответ и замеряем скорость
start_time = time.time()
answer = generate_answer(history, tokenizer, model)
generation_times.append(time.time() - start_time)
answer = tokenizer.tokenize(answer)
bleu = sentence_bleu(
references, answer, smoothing_function=SmoothingFunction().method1
)
BLEUs.append(bleu)
mean, std = np.round(np.mean(BLEUs), 4), np.round(np.std(BLEUs), 4)
print()
print(f"BLUE-4 №{generation_idx}: {mean}±{std}")
total_BLEUs.extend(BLEUs)
mean, std = np.round(np.mean(total_BLEUs), 4), np.round(np.std(total_BLEUs), 4)
print(f"BLUE-4 total: {mean}±{std}")
mean = np.round(np.mean(generation_times), 4)
std = np.round(np.std(generation_times), 4)
print(f"Average generation time: {mean}±{std}")
def get_model(
tokenizer_pretrained_path: str, model_pretrained_path: str
) -> Tuple[T5Tokenizer, T5ForConditionalGeneration]:
tokenizer = T5Tokenizer.from_pretrained(tokenizer_pretrained_path)
model = T5ForConditionalGeneration.from_pretrained(model_pretrained_path)
return tokenizer, model
@torch.no_grad()
def generate_answer(
history_text: str, tokenizer: T5Tokenizer, model: T5ForConditionalGeneration
) -> str:
inputs = tokenizer(history_text, return_tensors="pt")
hypotheses = model.generate(
**{k: v.to(model.device) for k, v in inputs.items()},
do_sample=True,
top_p=0.5,
num_return_sequences=1,
repetition_penalty=4.5,
max_length=1024,
)
return tokenizer.decode(hypotheses[0], skip_special_tokens=True)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
main(
args.datasets_path,
args.tokenizer_pretrained_path,
args.model_pretrained_path,
args.device,
args.max_history_tokens,
args.max_history_messages,
args.repeats_count,
) | /rupersonaagent-0.1.0-py3-none-any.whl/knowledge_distillation/evaluation.py | 0.713232 | 0.248534 | evaluation.py | pypi |
import argparse
import os
from pathlib import Path
from typing import Any, Dict, List, Tuple
import jsonlines
import numpy as np
import pandas as pd
import torch
from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu
from tqdm.auto import tqdm
from transformers.models.t5.modeling_t5 import T5ForConditionalGeneration
from transformers.models.t5.tokenization_t5 import T5Tokenizer
from utils.data_preparation import make_pairs
from utils.read_jsonl_data import read_jsonl_data
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("--datasets-path", default="data/Толока Персона Чат")
parser.add_argument(
"--tokenizer-pretrained-path", default="cointegrated/rut5-small-chitchat"
)
parser.add_argument(
"--model-pretrained-path",
default="experiments/exp2-t5-small-chitchat-finetuning/checkpoints/2000_steps",
)
parser.add_argument("--device", default="cuda:0")
parser.add_argument("--max-history-tokens", default=512)
parser.add_argument("--max-history-messages", default=4)
parser.add_argument(
"--save-path",
default="data/t5-small-chitchat-finetuned-generation/dialogs.jsonl",
)
return parser
def main(
datasets_path: List[str],
tokenizer_pretrained_path: str,
model_pretrained_path: str,
device: str,
max_history_tokens: int,
max_history_messages: int,
save_path: str,
):
datasets = [read_jsonl_data(path) for path in Path(datasets_path).iterdir()]
data: List[Dict[str, List[Dict[str, Any]]]] = list()
for dataset in datasets:
data.extend(dataset)
tokenizer, model = get_model(tokenizer_pretrained_path, model_pretrained_path)
model = model.eval().to(device)
df = make_pairs(data, tokenizer, max_history_tokens, max_history_messages)
df = pd.DataFrame([("</s>".join(p[0]), p[1]) for p in df])
df = df[~df.duplicated()]
BLEUs = list()
new_data = list()
# Группируем по одинаковым запросам
for df_tmp in tqdm(df.groupby(by=0)):
_, df_tmp = df_tmp
references = [
tokenizer.tokenize(reference) for reference in df_tmp[1].to_list()
]
for history in df_tmp[0]:
answer = generate_answer(history, tokenizer, model)
answer = tokenizer.tokenize(answer)
bleu = sentence_bleu(
references, answer, smoothing_function=SmoothingFunction().method1
)
BLEUs.append(bleu)
new_data.append((history, tokenizer.convert_tokens_to_string(answer)))
print("Generated BLUE-4:", np.mean(BLEUs))
new_data = pd.DataFrame(new_data)
dialogs = pairs2dialogs(new_data)
with jsonlines.open(save_path, mode="w") as writer:
for dialog in dialogs:
writer.write(dialog)
def get_model(
tokenizer_pretrained_path: str, model_pretrained_path: str
) -> Tuple[T5Tokenizer, T5ForConditionalGeneration]:
tokenizer = T5Tokenizer.from_pretrained(tokenizer_pretrained_path)
model = T5ForConditionalGeneration.from_pretrained(model_pretrained_path)
return tokenizer, model
@torch.no_grad()
def generate_answer(
history_text: str, tokenizer: T5Tokenizer, model: T5ForConditionalGeneration
) -> str:
inputs = tokenizer(history_text, return_tensors="pt")
hypotheses = model.generate(
**{k: v.to(model.device) for k, v in inputs.items()},
do_sample=True,
top_p=0.5,
num_return_sequences=1,
repetition_penalty=4.5,
max_length=1024,
)
return tokenizer.decode(hypotheses[0], skip_special_tokens=True)
def pairs2dialogs(pairs: pd.DataFrame) -> List[Dict[str, List[Dict[str, Any]]]]:
dialogs: List[Dict[str, List[Dict[str, Any]]]] = list()
for history, answer in tqdm(pairs.values):
dialog: Dict[str, List[Dict[str, Any]]] = {
"persons": list(),
"dialog": list(),
}
for text_idx, text in enumerate(history.split("</s>")):
message = {"person": text_idx % 2, "text": text, "gk": []}
dialog["dialog"].append(message)
message = {"person": (text_idx + 1) % 2, "text": answer, "gk": []}
dialog["dialog"].append(message)
dialogs.append(dialog)
return dialogs
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
datasets_path = args.datasets_path
tokenizer_pretrained_path = args.tokenizer_pretrained_path
model_pretrained_path = args.model_pretrained_path
device = args.device
max_history_tokens = args.max_history_tokens
max_history_messages = args.max_history_messages
save_path = args.save_path
main(
datasets_path,
tokenizer_pretrained_path,
model_pretrained_path,
device,
max_history_tokens,
max_history_messages,
save_path,
) | /rupersonaagent-0.1.0-py3-none-any.whl/knowledge_distillation/generate_knowledge.py | 0.667364 | 0.252839 | generate_knowledge.py | pypi |
```
from transformers import T5ForConditionalGeneration
t5_cpu = T5ForConditionalGeneration.from_pretrained("cointegrated/rut5-base-multitask", resume_download=True).eval()
t5_cuda = t5_cpu.cuda()
import torch
import torch.utils.benchmark as benchmark
import os
num_threads = 16
os.environ["OMP_NUM_THREADS"] = str(num_threads)
os.environ["MKL_NUM_THREADS"] = str(num_threads)
torch.set_num_threads(num_threads)
input_cpu = torch.randint(high=30000, size=(1, 32), dtype=torch.int64)
input_cuda = torch.randint(high=30000, size=(1, 32), dtype=torch.int64).cuda()
t5_cpu.generate(input_cpu, do_sample=True, num_beams=4, max_new_tokens=20)
t5_cuda.generate(input_cuda, do_sample=True, num_beams=4, max_new_tokens=20)
from optimum.onnxruntime import ORTModelForSeq2SeqLM
import onnxruntime as ort
sess_options = ort.SessionOptions()
sess_options.intra_op_num_threads = num_threads
sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
t5_ort_cpu = ORTModelForSeq2SeqLM.from_pretrained("cointegrated/rut5-base-multitask",
export=True,
provider="CPUExecutionProvider",
session_options=sess_options)
t5_ort_cuda = ORTModelForSeq2SeqLM.from_pretrained("cointegrated/rut5-base-multitask",
from_transformers=True,
provider="CUDAExecutionProvider")
t5_ort_cpu.generate(input_cpu, do_sample=True, num_beams=4, max_new_tokens=20)
t5_ort_cuda.generate(input_cuda, do_sample=True, num_beams=4, max_new_tokens=20)
from eet.transformers.modeling_t5 import EETT5ForConditionalGeneration
t5_eet = EETT5ForConditionalGeneration.from_pretrained("cointegrated/rut5-base-multitask", 1)
t5_eet.generate(input_cuda, do_sample=True, num_beams=4, max_new_tokens=1)
import lightseq.inference as lsi
t5_ls = lsi.T5("lightseq_t5_base.hdf5", 1)
t5_ls.infer(input_cpu.numpy())
from optimum.onnxruntime import ORTModelForSeq2SeqLM
t5_ort_trt = ORTModelForSeq2SeqLM.from_pretrained("cointegrated/rut5-base-multitask",
from_transformers=True,
provider="TensorrtExecutionProvider")
t5_ort_trt.generate(input_cuda, do_sample=True, num_beams=4, max_new_tokens=20)
from optimum.onnxruntime import ORTModelForSeq2SeqLM
import onnxruntime as ort
options = ort.SessionOptions()
options.intra_op_num_threads = num_threads
options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_DISABLE_ALL # https://onnxruntime.ai/docs/execution-providers/OpenVINO-ExecutionProvider.html#other-configuration-settings
t5_ort_openvino = ORTModelForSeq2SeqLM.from_pretrained("cointegrated/rut5-base-multitask",
export=True,
provider="OpenVINOExecutionProvider",
session_options=options,
provider_options={"num_of_threads": num_threads})
t5_ort_openvino.generate(input_cpu, do_sample=True, num_beams=4, max_new_tokens=20)
from optimum.intel.openvino import OVModelForSeq2SeqLM
t5_openvino = OVModelForSeq2SeqLM.from_pretrained("cointegrated/rut5-base-multitask",
export=True, use_cache=True, compile=True, ov_config={"INFERENCE_NUM_THREADS": num_threads})
t5_openvino.generate(input_cpu, do_sample=True, num_beams=2, max_new_tokens=20)
# For CPU benchmark.
from tqdm.auto import tqdm
from timeit import default_timer
import os
num_runs = 100
results = {}
for seq_len in [8, 32, 64]:
print(f"Sequence length: {seq_len}")
input_cpu = torch.randint(high=30000, size=(1, seq_len), dtype=torch.int64)
for model, description in zip([t5_cpu, t5_ort_cpu, t5_ort_openvino, t5_openvino],
["PyTorch (CPU)", "ORT (CPU)", "ORT (OpenVINO)", "Optimum OpenVINO"]):
model.generate(input_cpu, do_sample=True, num_beams=4, max_new_tokens=1) # warmup
start_time = default_timer()
for i in range(num_runs):
model.generate(input_cpu, do_sample=True, num_beams=4, max_new_tokens=1)
print(f"{description}\t{(default_timer() - start_time) / num_runs}")
# For GPU benchmark.
from tqdm.auto import tqdm
from timeit import default_timer
import os
num_runs = 100
results = {}
for seq_len in [8, 32, 64]:
print(f"Sequence length: {seq_len}")
input_cpu = torch.randint(high=30000, size=(1, seq_len), dtype=torch.int64)
input_cuda = torch.randint(high=30000, size=(1, seq_len), dtype=torch.int64, device="cuda")
for model, description in zip([t5_cuda, t5_eet, t5_ls], #t5_ort_cuda],
["PyTorch (CUDA)", "EET", "LightSeq"]): #"ORT (CUDA)"]):
if description == "LightSeq":
model.infer(input_cpu.numpy())
else:
model.generate(input_cuda, do_sample=True, num_beams=4, max_new_tokens=1) # warmup
torch.cuda.synchronize()
start_time = default_timer()
for i in range(num_runs):
if description == "LightSeq":
model.infer(input_cpu.numpy())
else:
model.generate(input_cuda, do_sample=True, num_beams=4, max_new_tokens=1)
torch.cuda.synchronize()
print(f"{description}\t{(default_timer() - start_time) / num_runs}")
model.generate(input_cuda, do_sample=True, num_beams=4, max_new_tokens=1)
```
| /rupersonaagent-0.1.0-py3-none-any.whl/inference_optimization/benchmark.ipynb | 0.517083 | 0.34091 | benchmark.ipynb | pypi |
```
import torch
import pytorch_lightning as pl
import transformers
import torchmetrics
import pandas as pd
import os
import json
from model import T5MultiTask
from data_module import TolokaDataModule
# proxy
os.environ["http_proxy"] = "http://proxy.ad.speechpro.com:3128"
os.environ["https_proxy"] = "http://proxy.ad.speechpro.com:3128"
os.environ["ftp_proxy"] = "http://proxy.ad.speechpro.com:3128"
train_batch_size = 64
t5 = transformers.T5ForConditionalGeneration.from_pretrained("cointegrated/rut5-base-multitask", resume_download=True)
tokenizer = transformers.AutoTokenizer.from_pretrained("cointegrated/rut5-base-multitask", truncation_side='left', padding_side='right')
with open('/home/stc/persona/data/preprocessing/spec_tokens.json') as spec_tokens_config:
spec_tokens = json.load(spec_tokens_config)
tokenizer.add_special_tokens(
{"additional_special_tokens": [spec_tokens[k] for k in spec_tokens]}
)
datamodule=TolokaDataModule(
data_dir='/home/stc/persona/data',
datasets=['current_gk', 'next_answer'], #'next_answer', 'current_gk', 'next_gk'
tokenizer=tokenizer,
spec_tokens=spec_tokens,
train_batch_size=128,
val_batch_size=256,
test_batch_size=256,
)
model = T5MultiTask(
model=t5,
datamodule=datamodule,
lr=5e-5,
num_warmup_steps=1000,
pooling="mean",
distance="cosine",
scale=20,
train_batch_size=train_batch_size,
val_batch_size=256,
test_batch_size=256,
)
# logger
logger = pl.loggers.comet.CometLogger(
api_key='sEJsZrYjwc0gxxUAUGQNBwTsb',
save_dir='/home/stc/persona/logs',
project_name='chaT5',
experiment_name='current_gk+next_answer base',
log_code=True,
)
# trainer
trainer = pl.Trainer(
max_epochs=15,
accelerator="gpu",
devices=1,
gradient_clip_val=1,
logger=logger,
num_sanity_val_steps=10,
)
trainer.fit(model, datamodule=datamodule)
datamodule_test=TolokaDataModule(
data_dir='/home/stc/persona/data',
datasets=['current_gk'], #'next_answer', 'current_gk', 'next_gk'
tokenizer=tokenizer,
spec_tokens=spec_tokens,
train_batch_size=train_batch_size,
val_batch_size=256,
test_batch_size=256,
)
val_set = datamodule_test.val_dataloader()
model.to('cuda')
for val_batch in val_set:
input_ids = val_batch['current_gk']['query']['input_ids']
true_ids = val_batch['current_gk']['candidate']['input_ids']
out = model.transformer.generate(input_ids.to('cuda'), do_sample=True,
num_beams=4,
)
out_text = model.datamodule.tokenizer.batch_decode(out, skip_special_tokens=True)
inp_text = model.datamodule.tokenizer.batch_decode(input_ids, skip_special_tokens=True)
true_text = model.datamodule.tokenizer.batch_decode(true_ids, skip_special_tokens=True)
for inp, out, true in zip(inp_text, out_text, true_text):
print('input:', inp)
print('model output:', out)
print('target output:', true)
print()
model.to('cuda')
for val_batch in val_set:
input_ids = val_batch['next_answer']['query']['input_ids']
true_ids = val_batch['next_answer']['candidate']['input_ids']
out = model.transformer.generate(input_ids.to('cuda'), do_sample=True,
num_beams=4,
)
inp_text = model.datamodule.tokenizer.batch_decode(input_ids, skip_special_tokens=False)
out_text = model.datamodule.tokenizer.batch_decode(out, skip_special_tokens=False)
true_text = model.datamodule.tokenizer.batch_decode(true_ids, skip_special_tokens=False)
for inp, out, true in zip(inp_text, out_text, true_text):
print('input:', inp.replace("<pad>", "").replace("[Model]", "\n[Model]").replace("[User]", "\n[User]"))
print('model output:', out.replace("<pad>", ""))
print('target output:', true.replace("<pad>", ""))
print()
```
| /rupersonaagent-0.1.0-py3-none-any.whl/generative_model/main.ipynb | 0.473901 | 0.274364 | main.ipynb | pypi |
import pytorch_lightning as pl
import torch
import torchmetrics
import transformers
from typing import Literal
def parse_recursive_dict(inp_dict, tokens=None):
tokens = tokens or []
for k in inp_dict:
if isinstance(inp_dict[k], dict):
tokens = parse_recursive_dict(inp_dict[k], tokens=tokens)
else:
tokens.append(inp_dict[k])
return tokens
class T5MultiTask(pl.LightningModule):
def __init__(
self,
model,
datamodule,
lr: float = 5e-5,
num_warmup_steps: int = 100,
pooling: Literal["mean", "cls"] = "mean",
distance: Literal["cosine", "dot_product"] = "cosine",
scale: int = 20,
train_batch_size: int = 64,
val_batch_size: int = 64,
test_batch_size: int = 64,
):
super().__init__()
self.save_hyperparameters()
self.transformer = model
self.datamodule = datamodule
self.transformer.resize_token_embeddings(len(self.datamodule.tokenizer))
# loss
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
# metrics
# next answer
next_answer_metrics = torchmetrics.MetricCollection(
{
"next_answer_BLEU1": torchmetrics.BLEUScore(n_gram=1),
"next_answer_BLEU2": torchmetrics.BLEUScore(n_gram=2),
}
)
self.train_next_answer_metrics = next_answer_metrics.clone(prefix="train_")
self.val_next_answer_metrics = next_answer_metrics.clone(prefix="val_")
# current gk
current_gk_metrics = torchmetrics.MetricCollection(
{
"current_gk_BLEU1": torchmetrics.BLEUScore(n_gram=1),
"current_gk_BLEU2": torchmetrics.BLEUScore(n_gram=2),
}
)
self.train_current_gk_metrics = current_gk_metrics.clone(prefix="train_")
self.val_current_gk_metrics = current_gk_metrics.clone(prefix="val_")
# next gk
# TODO: add MetricCollection for next gk: r1 r5 mrr
def get_embedding(self, inputs):
model_output = self.transformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
labels=inputs["input_ids"],
output_hidden_states=True,
)
if self.hparams.pooling == "mean":
embeddings = self.mean_pooling(model_output, inputs["attention_mask"])
elif self.hparams.pooling == "cls":
embeddings = self.cls_pooling(model_output)
if self.hparams.distance == "cosine":
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
# add embeddings = None or else: raise if embeddings not defined
return embeddings
def training_step(self, batch: dict, batch_idx):
task = batch["task"]
batch = batch[task]
if task == "next_gk" and False:
# Compute embeddings
query = self.get_embedding(batch["query"])
candidate = self.get_embedding(batch["candidate"])
labels = torch.argmax(batch["labels"], dim=-1)
# Compute similarity scores
scores = torch.mm(query, candidate.transpose(0, 1)) * self.hparams.scale
# Symmetric loss as in CLIP
loss = (self.cross_entropy_loss(scores, labels) + self.cross_entropy_loss(
scores.transpose(0, 1),
labels)) / 2
# metrics
preds = scores.view(-1).cpu()
targets = batch["labels"].reshape(preds.shape)
indexes = (
torch.arange(scores.shape[0])
.unsqueeze(1)
.expand_as(scores)
.reshape(preds.shape)
)
# Log
self.log("train_loss_next_gk", loss.item(), sync_dist=True)
metrics = self.train_next_gk_metrics[task](preds, targets, indexes)
self.log_dict(metrics, sync_dist=True)
# current gk
elif task == "current_gk":
model_output = self.transformer(
input_ids=batch["query"]["input_ids"],
attention_mask=batch["query"]["attention_mask"],
labels=batch["candidate"]["input_ids"],
)
# loss
loss = model_output.loss
# metrics
model_output = self.transformer.generate(
input_ids=batch["query"]["input_ids"]
)
target_candidate = self.datamodule.tokenizer.batch_decode(
batch["candidate"]["input_ids"], skip_special_tokens=True
)
target_candidate = [[i] for i in target_candidate]
output_candidate = self.datamodule.tokenizer.batch_decode(
model_output, skip_special_tokens=True
)
metrics = self.train_current_gk_metrics(output_candidate, target_candidate)
self.log_dict(metrics, sync_dist=True)
# next answer
elif task == "next_answer":
model_output = self.transformer(
input_ids=batch["query"]["input_ids"],
attention_mask=batch["query"]["attention_mask"],
labels=batch["candidate"]["input_ids"],
)
# loss
loss = model_output.loss
# metrics
model_output = self.transformer.generate(
input_ids=batch["query"]["input_ids"],
)
target_candidate = self.datamodule.tokenizer.batch_decode(
batch["candidate"]["input_ids"], skip_special_tokens=True
)
target_candidate = [[i] for i in target_candidate]
output_candidate = self.datamodule.tokenizer.batch_decode(
model_output, skip_special_tokens=True
)
metrics = self.train_next_answer_metrics(output_candidate, target_candidate)
self.log_dict(metrics, sync_dist=True)
return loss
def validation_step(self, batch: dict, batch_idx):
task = batch["task"]
batch = batch[task]
if task == "next_gk" and False:
# Compute embeddings
query = self.get_embedding(batch["query"])
candidate = self.get_embedding(batch["candidate"])
# labels = torch.argmax(batch["labels"], dim=-1)
# Compute similarity scores
scores = torch.mm(query, candidate.transpose(0, 1)) * self.hparams.scale
# Symmetric loss as in CLIP
# loss = (
# self.cross_entropy_loss(scores, labels)
# + self.cross_entropy_loss(scores.transpose(0, 1), labels)
# ) / 2
loss = self.triplet_loss(batch["labels"], scores)
# metrics
preds = scores.view(-1).cpu()
targets = batch["labels"].reshape(preds.shape)
indexes = (
torch.arange(scores.shape[0])
.unsqueeze(1)
.expand_as(scores)
.reshape(preds.shape)
)
# Log
self.log("val_next_gk_loss", loss.item(), sync_dist=True)
metrics = self.val_next_gk_metrics[task](preds, targets, indexes)
self.log_dict(metrics, sync_dist=True)
# current gk
elif task == "current_gk":
model_output = self.transformer(
input_ids=batch["query"]["input_ids"],
attention_mask=batch["query"]["attention_mask"],
labels=batch["candidate"]["input_ids"],
)
# loss
loss = model_output.loss
# metrics
model_output = self.transformer.generate(
input_ids=batch["query"]["input_ids"],
)
target_candidate = self.datamodule.tokenizer.batch_decode(
batch["candidate"]["input_ids"], skip_special_tokens=True
)
target_candidate = [[i] for i in target_candidate]
output_candidate = self.datamodule.tokenizer.batch_decode(
model_output, skip_special_tokens=True
)
metrics = self.val_current_gk_metrics(output_candidate, target_candidate)
self.log_dict(metrics, sync_dist=True)
# next answer
elif task == "next_answer":
model_output = self.transformer(
input_ids=batch["query"]["input_ids"],
attention_mask=batch["query"]["attention_mask"],
labels=batch["candidate"]["input_ids"],
)
# loss
loss = model_output.loss
# metrics
model_output = self.transformer.generate(
input_ids=batch["query"]["input_ids"],
)
target_candidate = self.datamodule.tokenizer.batch_decode(
batch["candidate"]["input_ids"], skip_special_tokens=True
)
target_candidate = [[i] for i in target_candidate]
output_candidate = self.datamodule.tokenizer.batch_decode(
model_output, skip_special_tokens=True
)
metrics = self.val_next_answer_metrics(output_candidate, target_candidate)
self.log_dict(metrics, sync_dist=True)
def configure_optimizers(self):
optimizer = torch.optim.AdamW(self.parameters(), lr=self.hparams.lr)
scheduler = transformers.get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=self.hparams.num_warmup_steps,
num_training_steps=self.trainer.estimated_stepping_batches,
)
return [optimizer], [
{"scheduler": scheduler, "name": "cosine_scheduler", "interval": "step"}
]
def mean_pooling(self, model_output: torch.Tensor, attention_mask: torch.Tensor):
# First element of model_output contains all token embeddings
token_embeddings = model_output.encoder_last_hidden_state
input_mask_expanded = (
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
)
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
input_mask_expanded.sum(1), min=1e-9
)
def cls_pooling(self, model_output: torch.Tensor):
return model_output.encoder_last_hidden_state[:, 0]
def triplet_loss(self, target, pred, margin=1.0):
pos_mask = target
neg_mask = torch.abs(target - 1)
n_pos = torch.sum(pos_mask, dim=-1)
n_neg = torch.sum(neg_mask, dim=-1)
sims_pos = pred * pos_mask
sims_neg = pred * pos_mask
mean_sim_pos = sims_pos / n_pos
mean_sim_neg = sims_neg / n_neg
return torch.mean(mean_sim_neg - mean_sim_pos + margin) * 10 | /rupersonaagent-0.1.0-py3-none-any.whl/generative_model/model.py | 0.839701 | 0.391173 | model.py | pypi |
import pandas as pd
from speech_extraction.SpeechCharacteristic import SpeechCharacteristic, characteristics
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from statistics import mean
import string
import operator
import functools
from collections import Counter
import scipy.stats as stats
nltk.download('punkt')
def read_dataset(dataset_name: str, path: str, column_name: str, delimiter: str):
df = pd.read_csv(path, delimiter=delimiter)
return df[column_name], dataset_name
def get_sentence_and_words(dataset):
sentences = []
words = []
letters_in_words = []
punctuations = []
letters = string.ascii_letters
punctuation = string.punctuation
for key, value in dataset.iteritems():
sentences.append(len(sent_tokenize(value)))
for sent in sent_tokenize(value):
words_count = len(word_tokenize(sent))
words.append(words_count)
letter_count = len(
list(filter(functools.partial(operator.contains, letters), sent))
)
punctuation_count = len(
list(filter(functools.partial(operator.contains, punctuation), sent))
)
letters_in_words.append(letter_count / words_count)
punctuations.append(punctuation_count)
return (
mean(sentences),
mean(words),
mean(letters_in_words),
mean(punctuations),
sum(sentences),
)
def get_pos_tags(dataset, size):
text = ' '.join([v for k, v in dataset.iteritems()])
tokens = nltk.word_tokenize(text.lower())
new_text = nltk.Text(tokens)
tags = nltk.pos_tag(new_text)
counts = Counter(tag for word, tag in tags)
return dict((word, float(count) / size) for word, count in counts.items())
def get_speech_characteristic(dataset, dataset_name: str):
speech = SpeechCharacteristic(dataset_name=dataset_name)
sentences, words, letters, punctuation, size = get_sentence_and_words(dataset)
speech.sentences_in_speech = sentences
speech.words_in_phrase = words
speech.letter_in_words = letters
speech.punctuation = punctuation
pos_tags = get_pos_tags(dataset, size)
for ch in characteristics:
if pos_tags.get(ch):
speech.__dict__[ch] = pos_tags[ch]
return speech
def get_info_from_dataset(
dataset_name: str, path: str, column_name: str, delimiter: str
):
dataset, dataset_name = read_dataset(dataset_name, path, column_name, delimiter)
speech_characteristic = get_speech_characteristic(dataset, dataset_name)
return speech_characteristic
def get_info_from_sentence(sentence: str, column_name: str, phrase_name: str):
dataset = pd.Series({column_name: sentence})
speech_characteristic = get_speech_characteristic(dataset, phrase_name)
return speech_characteristic
def compare_characteristics(
first_characteristic: SpeechCharacteristic,
second_characteristic: SpeechCharacteristic,
):
first = list(first_characteristic.__dict__.values())[1:]
second = list(second_characteristic.__dict__.values())[1:]
_, pnorm = stats.mannwhitneyu(first, second, use_continuity=False)
return pnorm | /rupersonaagent-0.1.0-py3-none-any.whl/speech_extraction/SpeechExtraction.py | 0.495361 | 0.241109 | SpeechExtraction.py | pypi |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Basic script which allows local human keyboard input to talk to a trained model.
Examples
--------
.. code-block:: shell
python projects/convai2/interactive.py -mf models:convai2/kvmemnn/model
When prompted, chat with the both, you will both be assigned personalities!
Use "[DONE]" to indicate you are done with that chat partner, and want a new one.
"""
from parlai.core.params import ParlaiParser
from parlai.core.agents import create_agent
from parlai.core.worlds import create_task
from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
from parlai.agents.local_human.local_human import LocalHumanAgent
import random
pretrained_model_file = 'models/reinforced/cosplay.model'
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True, 'Interactive chat with a model')
parser.add_argument('-d', '--display-examples', type='bool', default=False)
parser.add_argument(
'--display-prettify',
type='bool',
default=False,
help='Set to use a prettytable when displaying '
'examples with text candidates',
)
parser.add_argument(
'--display-ignore-fields',
type=str,
default='label_candidates,text_candidates',
help='Do not display these fields',
)
parser.set_defaults(model_file=pretrained_model_file)
LocalHumanAgent.add_cmdline_args(parser)
return parser
def interactive(opt, print_parser=None):
if print_parser is not None:
if print_parser is True and isinstance(opt, ParlaiParser):
print_parser = opt
elif print_parser is False:
print_parser = None
if isinstance(opt, ParlaiParser):
print('[ Deprecated Warning: interactive should be passed opt not Parser ]')
opt = opt.parse_args()
opt['task'] = 'parlai.agents.local_human.local_human:LocalHumanAgent'
# Create model and assign it to the specified task
agent = create_agent(opt, requireModelExists=True)
world = create_task(opt, agent)
if print_parser:
# Show arguments after loading model
print_parser.opt = agent.opt
print_parser.print_args()
# Create ConvAI2 data so we can assign personas.
convai2_opt = opt.copy()
convai2_opt['task'] = 'convai2:both'
convai2_agent = RepeatLabelAgent(convai2_opt)
convai2_world = create_task(convai2_opt, convai2_agent)
def get_new_personas():
# Find a new episode
while True:
convai2_world.parley()
msg = convai2_world.get_acts()[0]
if msg['episode_done']:
convai2_world.parley()
msg = convai2_world.get_acts()[0]
break
txt = msg.get('text', '').split('\n')
bot_persona = ""
for t in txt:
if t.startswith("partner's persona:"):
print(t.replace("partner's ", 'your '))
if t.startswith('your persona:'):
bot_persona += t + '\n'
print("Enter [DONE] if you want a new partner at any time.")
return bot_persona
# Now run interactive mode, chatting with personas!
cnt = 0
while True:
if cnt == 0:
bot_persona = get_new_personas()
print('BOT PERSONA:')
print(bot_persona.split('\n'))
# Run the parts of world.parley() in turn,
# but insert persona into user message.
acts = world.acts
agents = world.agents
acts[0] = agents[0].act()
# add the persona on to the first message
if agents[0].episode_done():
print("CHAT DONE ")
print("\n... preparing new chat... \n")
cnt = 0
agents[0].episodeDone = False
continue
if cnt == 0:
acts[0]['text'] = bot_persona + acts[0].get('text', 'hi')
agents[1].observe(acts[0])
acts[1] = agents[1].act()
agents[0].observe(acts[1])
world.update_counters()
cnt = cnt + 1
if opt.get('display_examples'):
print("---")
print(world.display())
if __name__ == '__main__':
random.seed(42)
parser = setup_args()
parser.set_params(
dict_lower=True,
batchsize=1,
rank_candidates=False,
model='agents.cosplay.cosplay:TransformerAgent',
init_model_transmitter=pretrained_model_file
)
interactive(parser.parse_args(print_args=False), print_parser=parser) | /rupersonaagent-0.1.0-py3-none-any.whl/personification/interactive.py | 0.756627 | 0.288131 | interactive.py | pypi |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Train model for ppl metric with pre-selected parameters.
These parameters have some variance in their final perplexity, but they were
used to achieve the pre-trained model.
"""
import random
import torch
from agents.cosplay.cosplay import ARCH_CHOICE
from parlai.scripts.train_model import setup_args as setup_dict_args, TrainLoop
# Parameters
NAME = "cosplay_base"
IS_ORIGINAL = True
GEN, GATE, CLS = 1., 1., 1.
PERSONA_POOL_SIZE = 250
PERSONA_LOWER_BOUND = 0
CONTEXT_LOWER_BOUND = 0
USE_ATTENTION = True
BEAM_SIZE = 2
MODEL_DIR = 'models/supervised'
DATA_DIR = 'data'
def setup_task():
if IS_ORIGINAL:
task_name = 'tasks.convai2cosplay.agents:SelfOriginalTeacher'
else:
task_name = 'tasks.convai2cosplay.agents:SelfRevisedTeacher'
return task_name
def setup_seed(seed=1706123):
# random seed, to evaluate the performance
torch.random.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
def gpt_setting():
return 10, 1e-4, 'gpt_custom', 1.0
def lstm_setting():
return 64, 3, 'sgd', 0.1
def setup_args():
"""
Use create test env setting
:return: opt
"""
parser = setup_dict_args()
exp_name = NAME
n_epoches = 100
beam_size = BEAM_SIZE
encode_layers = 2
decode_layers = 2
embedding_size = 256
turn_emed_size = 50
encoder_turn_use = False
encoder_dis_use = False
encoder_hidden_size = 1024
decoder_hidden_size = 1024
encode_max_seq_len = 256
decode_max_seq_len = 32
smoothing = 0.05
dropout = 0.1
embedding_type = 'glove'
momentum = 0.9
persona_append_strategy = 'concat'
history_append_strategy = -1
select_persona = False
shuffle_persona = True
share_decoder_input_output_embed = False
num_train_epochs = 4
if ARCH_CHOICE == 'gpt':
batchsize, lr, optimizer, gradient_clip = gpt_setting()
else:
batchsize, lr, optimizer, gradient_clip = lstm_setting()
task_name = setup_task()
parser.set_defaults(
task=task_name,
rank_candidates=False,
# task='tasks.convai2cosplay.agents:SelfRevisedTeacher:no_cands',
model='agents.cosplay.cosplay:TransformerAgent',
datapath=DATA_DIR,
# =====================
gen_weight=GEN,
gate_weight=GATE,
cls_weight=CLS,
persona_pool_size=PERSONA_POOL_SIZE,
persona_lower_bound=PERSONA_LOWER_BOUND,
context_lower_bound=CONTEXT_LOWER_BOUND,
use_attention=USE_ATTENTION,
# ======================
model_file='{}/{}.model'.format(MODEL_DIR, exp_name),
dict_tokenizer='split',
datatype='train',
gpt_lr=6.25e-5,
n_epoches=n_epoches,
num_epochs=num_train_epochs,
batchsize=batchsize,
beam_size=beam_size,
encoder_layers=encode_layers,
decoder_layers=decode_layers,
encoder_embed_dim=embedding_size,
encoder_turn_dim=turn_emed_size,
encoder_turn_use=encoder_turn_use,
encoder_dis_use=encoder_dis_use,
decoder_embed_dim=embedding_size,
encode_max_seq_len=encode_max_seq_len,
decode_max_seq_len=decode_max_seq_len,
select_persona=select_persona,
shuffle_persona=shuffle_persona,
persona_append_strategy=persona_append_strategy,
history_append_strategy=history_append_strategy,
encoder_bidirectional=False,
encoder_hidden_size=encoder_hidden_size,
decoder_hidden_size=decoder_hidden_size,
smoothing=smoothing,
lr=lr,
dropout=dropout,
encoder_dropout_in=dropout,
encoder_dropout_out=0,
decoder_dropout_in=dropout,
decoder_dropout_out=0,
share_decoder_input_output_embed=share_decoder_input_output_embed,
gradient_clip=gradient_clip,
lookuptable='enc_dec',
optimizer=optimizer,
embedding_type=embedding_type,
momentum=momentum,
# rough enough
validation_max_exs=-1,
validation_every_n_secs=3600,
validation_metric='f1',
validation_metric_mode='min',
validation_patience=5,
log_every_n_secs=30,
gpu=0,
# logging configuration
exp=exp_name,
tensorboard_log=True,
tensorboard_tag='exp',
train_report_metrics='ppl,f1,hits@1',
tensorboard_metrics='ppl,f1,hits@1',
visualization=False
)
return parser
if __name__ == '__main__':
opt = setup_args()
setup_seed()
TrainLoop(opt).train() | /rupersonaagent-0.1.0-py3-none-any.whl/personification/train_cosplay_in_supervised.py | 0.882567 | 0.250597 | train_cosplay_in_supervised.py | pypi |
import os
import copy
from parlai.core.teachers import FbDialogTeacher
from parlai.tasks.convai2.build import build
def _path(opt, persona, use_cands=None, self_play=False):
"""
:param opt:
:param persona: string, e.g. 'self', 'other', 'both', 'all',
'self_revised', 'other_revised',
'both_revised', 'all_revised'
:param use_cands:
:param self_play:
:return:
"""
# Build the data if it doesn't exist.
build(opt)
datatype = opt['datatype'].split(':')[0]
if datatype == 'test':
print("WARNING: Test set not included. Setting datatype to valid.")
datatype = 'valid'
dt = datatype + '_' + persona
cands = '' if use_cands else '_no_cands'
if self_play:
return os.path.join(opt['datapath'], 'ConvAI2', dt + '_selfplay' + '.txt')
else:
return os.path.join(opt['datapath'], 'ConvAI2', dt + cands + '.txt')
class OriginalPersonaTeacher(FbDialogTeacher):
"""Teacher, generate a persona in each act"""
def __init__(self, opt, shared=None):
"""
:param opt:
:param shared:
"""
assert 'personapath' in opt, 'Please specify the path for the persona-only file'
opt = copy.deepcopy(opt)
opt['datafile'] = _path(opt, persona='self_original', self_play=True)
super().__init__(opt, shared)
class OriginalTeacher(FbDialogTeacher):
"""Teacher, generate a persona in each act"""
def __init__(self, opt, shared=None):
"""
:param opt:
:param shared:
"""
assert 'personapath' in opt, 'Please specify the path for the persona-only file'
opt = copy.deepcopy(opt)
opt['datafile'] = _path(opt, persona='self_original', self_play=False)
super().__init__(opt, shared)
class RevisedPersonaTeacher(FbDialogTeacher):
"""Teacher, generate a persona in each act"""
def __init__(self, opt, shared=None):
"""
:param opt:
:param shared:
"""
assert 'personapath' in opt, 'Please specify the path for the persona-only file'
opt = copy.deepcopy(opt)
opt['datafile'] = _path(opt, persona='self_revised', self_play=True)
super().__init__(opt, shared)
class RevisedTeacher(FbDialogTeacher):
"""Teacher, generate a persona in each act"""
def __init__(self, opt, shared=None):
"""
:param opt:
:param shared:
"""
assert 'personapath' in opt, 'Please specify the path for the persona-only file'
opt = copy.deepcopy(opt)
opt['datafile'] = _path(opt, persona='self_revised', self_play=False)
super().__init__(opt, shared) | /rupersonaagent-0.1.0-py3-none-any.whl/personification/tasks/convai2/agents.py | 0.488039 | 0.298939 | agents.py | pypi |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import FbDialogTeacher
from parlai.core.utils import warn_once
from .build_data import build
import copy
import os
'''All teachers have a version with and without label candidates. Each teacher
defaults to using a dataset with label candidates. To use a dataset without
label candidates, specify this using the task flag:
--task convai2:{TEACHER_NAME}:no_cands
where TEACHER_NAME is None, SelfOriginal (Self), or SelfRevised.
'''
def _path(opt, persona, use_cands):
# Build the data if it doesn't exist.
build(opt)
datatype = opt['datatype'].split(':')[0]
if datatype == 'test':
warn_once("WARNING: Test set not included. Setting datatype to valid.")
datatype = 'valid'
dt = datatype + '_' + persona
cands = '' if use_cands else '_no_cands'
return os.path.join(opt['datapath'], 'ConvAI2', dt + cands + '.txt')
class BothOriginalTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
try:
cands = opt['task'].split(":")[2]
use_cands = False if cands == 'no_cands' else True
except Exception:
use_cands = True
opt['datafile'] = _path(opt, 'both_original', use_cands)
super().__init__(opt, shared)
class BothTeacher(BothOriginalTeacher):
pass
class BothRevisedTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
try:
cands = opt['task'].split(":")[2]
use_cands = False if cands == 'no_cands' else True
except Exception:
use_cands = True
opt['datafile'] = _path(opt, 'both_revised', use_cands)
super().__init__(opt, shared)
class NoneTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
try:
cands = opt['task'].split(":")[2]
use_cands = False if cands == 'no_cands' else True
except Exception:
use_cands = True
opt['datafile'] = _path(opt, 'none_original', use_cands)
super().__init__(opt, shared)
class SelfOriginalTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
try:
cands = opt['task'].split(":")[2]
use_cands = False if cands == 'no_cands' else True
except Exception:
use_cands = True
opt['datafile'] = _path(opt, 'self_original', use_cands)
super().__init__(opt, shared)
class SelfTeacher(SelfOriginalTeacher):
pass
class SelfRevisedTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
try:
cands = opt['task'].split(":")[2]
use_cands = False if cands == 'no_cands' else True
except Exception:
use_cands = True
opt['datafile'] = _path(opt, 'self_revised', use_cands)
super().__init__(opt, shared)
class DefaultTeacher(SelfOriginalTeacher):
pass | /rupersonaagent-0.1.0-py3-none-any.whl/personification/tasks/convai2cosplay/agents.py | 0.73077 | 0.244713 | agents.py | pypi |
import torch
from torch import nn
class TokenCrossEntropyLoss(nn.Module):
def __init__(self, pad_index=0):
super(TokenCrossEntropyLoss, self).__init__()
self.pad_index = pad_index
self.base_loss_function = nn.CrossEntropyLoss(reduction='sum', ignore_index=pad_index)
def forward(self, outputs, targets, keep_batch=False):
batch_size, seq_len, vocabulary_size = outputs.size()
if not keep_batch:
outputs_flat = outputs.contiguous().view(batch_size * seq_len, vocabulary_size)
targets_flat = targets.contiguous().view(batch_size * seq_len)
batch_loss = self.base_loss_function(outputs_flat, targets_flat)
else:
batch_loss = []
for output, target in zip(outputs, targets):
batch_loss.append(self.base_loss_function(output, target.view(-1)).view(-1))
batch_loss = torch.cat(batch_loss)
# count = (targets != self.pad_index).sum().item()
return batch_loss
class LabelSmoothingLoss(nn.Module):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing, vocabulary_size, pad_index=0):
assert 0.0 < label_smoothing <= 1.0
super(LabelSmoothingLoss, self).__init__()
self.pad_index = pad_index
self.log_softmax = nn.LogSoftmax(dim=-1)
self.criterion = nn.KLDivLoss(reduction='sum')
smoothing_value = label_smoothing / (vocabulary_size - 2) # exclude pad and true label
smoothed_targets = torch.full((vocabulary_size,), smoothing_value)
smoothed_targets[self.pad_index] = 0
self.register_buffer('smoothed_targets', smoothed_targets.unsqueeze(0)) # (1, vocabulary_size)
self.confidence = 1.0 - label_smoothing
def forward(self, outputs, targets):
"""
outputs (FloatTensor): (batch_size, seq_len, vocabulary_size)
targets (LongTensor): (batch_size, seq_len)
"""
batch_size, seq_len, vocabulary_size = outputs.size()
outputs_log_softmax = self.log_softmax(outputs)
outputs_flat = outputs_log_softmax.contiguous().view(batch_size * seq_len, vocabulary_size)
targets_flat = targets.contiguous().view(batch_size * seq_len)
smoothed_targets = self.smoothed_targets.repeat(targets_flat.size(0), 1)
# smoothed_targets: (batch_size * seq_len, vocabulary_size)
smoothed_targets.scatter_(1, targets_flat.unsqueeze(1), self.confidence)
# smoothed_targets: (batch_size * seq_len, vocabulary_size)
smoothed_targets.masked_fill_((targets_flat == self.pad_index).unsqueeze(1), 0)
# masked_targets: (batch_size * seq_len, vocabulary_size)
loss = self.criterion(outputs_flat, smoothed_targets)
# count = (targets != self.pad_index).sum().item()
return loss | /rupersonaagent-0.1.0-py3-none-any.whl/personification/agents/cosplay/gpt/loss.py | 0.964971 | 0.530662 | loss.py | pypi |
import numpy as np
def _calculate_expect_softmax_loss(score_view, ys):
denominator = np.exp(score_view).sum(-1)
numerator = []
for i, y in enumerate(ys):
numerator.append(np.exp(score_view[i][y]).reshape(1, ))
numerator = np.concatenate(numerator, axis=0)
out = numerator / denominator
return np.mean(- np.log(out))
def padding(_xs, max_1d_size, max_2d_size, null_idx):
for _ in range(max_1d_size - len(_xs)):
_xs.append([])
for x in _xs:
for _ in range(max_2d_size - len(x)):
x.append(null_idx)
def padding4d(_xs, max_1d_size, max_2d_size, max_3d_size, null_idx):
for _ in range(max_1d_size - len(_xs)):
_xs.append([])
for x in _xs:
for _ in range(max_2d_size - len(x)):
x.append([])
for x in _xs:
for y in x:
for _ in range(max_3d_size - len(y)):
y.append(null_idx)
def split_pad_vector(xs, separator, null_idx):
"""
Use the splitor to split the sentences.
spliter is the value that represents END TOKEN
:param x: input
:param separator: the required seperator
:return: a list of dialogs after splitting and padding
"""
def split(x):
_xs = []
temp_x = []
for _x in x:
if _x == separator:
_xs.append(temp_x)
temp_x = []
continue
if _x != null_idx:
temp_x.append(_x)
if len(temp_x):
_xs.append(temp_x)
return _xs
def get_max_words_size(_xs):
max_size = 0
for agent in _xs:
for dialog in agent:
if len(dialog) > max_size:
max_size = len(dialog)
return max_size
xs = [split(x) for x in xs]
max_turn_size = max((len(x) for x in xs))
max_words_size = get_max_words_size(xs)
for agent in xs:
padding(agent, max_turn_size, max_words_size, null_idx)
return xs
def split_pad_vector_for_bug(xs, separator, null_idx):
"""
Use the splitor to split the sentences.
spliter is the value that represents END TOKEN
:param x: input
:param separator: the required seperator
:return: a list of dialogs after splitting and padding
"""
# coherent send
def split_40483(x):
_xs = []
temp_x = []
for _x in x:
if _x == 40483:
if temp_x[-1] == 40484:
_xs.append(temp_x)
temp_x = []
continue
if _x != null_idx:
temp_x.append(_x)
if len(temp_x):
_xs.append(temp_x)
return _xs
# language model send
def split_40484(x):
_xs = []
temp_x = []
for _x in x:
if _x in [40483, 40478, 40479, 40480, 40481]:
continue
if _x == separator:
_xs.append(temp_x)
temp_x = []
continue
if _x != null_idx:
temp_x.append(_x)
if len(temp_x):
_xs.append(temp_x)
return _xs
def get_max_words_size(_xs):
max_size = 0
for agent in _xs:
for dialog in agent:
if len(dialog) > max_size:
max_size = len(dialog)
return max_size
if 40483 == separator:
xs = [split_40483(x) for x in xs]
else:
xs = [split_40484(x) for x in xs]
max_turn_size = max((len(x) for x in xs))
max_words_size = get_max_words_size(xs)
for agent in xs:
padding(agent, max_turn_size, max_words_size, null_idx)
return xs
def reverse_padding(xs, PAD_IDX=0):
"""
Move the PAD_IDX in front of the dialog
:param xs: input dialogs, which are encoded by dictionary,
:param PAD_IDX: the index of the __NULL__
Examples
--------
>>> xs = [[3, 1, 2, 0, 0],
... [2, 1, 4, 0, 0]]
>>> reverse_padding(xs, 0)
[[0, 0, 3, 1, 2],
[0, 0, 2, 1, 4]]
"""
if not isinstance(xs, list):
xs = [[x for x in ex] for ex in xs]
ans = []
if len(xs) == 0:
return xs
n = len(xs[0])
for line in xs:
end_idx = n - 1
for end_idx in range(n - 1, -1, -1):
if line[end_idx] != PAD_IDX:
break
end_idx += 1
padding_num = n - end_idx
new_line = [PAD_IDX] * padding_num + line[:end_idx]
ans.append(new_line)
return ans | /rupersonaagent-0.1.0-py3-none-any.whl/personification/agents/utils/utils.py | 0.436142 | 0.493287 | utils.py | pypi |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.dict import DictionaryAgent
import torch
try:
from pytorch_pretrained_bert import OpenAIGPTTokenizer
except ImportError:
raise ImportError('please ensure that pytorch-pretrained-BERT installed. \n '
'pip install pytorch-pretrained-bert')
from .dict_helper import SpecialToken
import os
def recover_bpe_encoding(bpe_tokens):
output_tokens = []
temp_token = ''
for bpe_token in bpe_tokens:
if '</w>' in bpe_token:
temp_token += bpe_token.replace('</w>', '')
output_tokens.append(temp_token)
temp_token = ''
else:
temp_token += bpe_token.strip()
if temp_token != '':
output_tokens.append(temp_token)
return output_tokens
class GPTDictionaryAgent(DictionaryAgent):
""" Allow to use the Torch Agent with the wordpiece dictionary of Hugging Face.
"""
default_lang = 'english'
default_maxngram = -1
default_minfreq = 0
default_maxtokens = -1
default_null = SpecialToken.pad
default_start = SpecialToken.start
default_end = SpecialToken.end
default_unk = SpecialToken.unk
default_tok = 'bpe'
default_lower = True
default_textfields = 'text,labels'
def __init__(self, opt):
super().__init__(opt)
# initialize from voab path
cache_vocab_dir = os.path.join(opt['datapath'], 'from_pretrained')
self.special_tokens = [SpecialToken.talk_1_start,
SpecialToken.talk_1_end,
SpecialToken.persona_start,
SpecialToken.persona_end,
SpecialToken.no_fact,
SpecialToken.start,
SpecialToken.end,
SpecialToken.slice_sym]
# add special token after the pre-trained bpe text
self.tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt',
cache_dir=cache_vocab_dir,
special_tokens=self.special_tokens)
self.start_token = self.default_start
self.end_token = self.default_end
self.null_token = self.default_null
# <unk> already in the dictionary
self.start_idx = self.tokenizer.convert_tokens_to_ids([SpecialToken.start])[0]
# <end> is used to split a long text into different parts, which is necessary for us
# to differentiate persona & history only passing the observation function for one time
self.end_idx = self.tokenizer.convert_tokens_to_ids([SpecialToken.end])[0]
self.pad_idx = self.tokenizer.convert_tokens_to_ids([SpecialToken.pad])[0] # should be 0
# update for default tokenizer vocabulary
self.tok2ind.clear()
self.ind2tok.clear()
# set tok2ind for special tokens
for special_token in self.special_tokens + [self.start_token, self.end_token, self.null_token]:
token_id = self.tokenizer.convert_tokens_to_ids([special_token])[0]
self.tok2ind[special_token] = token_id
self.ind2tok[token_id] = special_token
def txt2vec(self, text, vec_type=list):
tokens = self.tokenizer.tokenize(text)
tokens_id = self.tokenizer.convert_tokens_to_ids(tokens)
return tokens_id
def vec2txt(self, tensor_list, recover_bpe=False):
if isinstance(tensor_list[0], torch.Tensor):
idxs = [idx.cpu().item() for idx in tensor_list]
else:
idxs = list(tensor_list)
# filter unk ids
max_vocab_size = len(self.tokenizer.decoder) + len(self.special_tokens)
idxs = [self.pad_idx if idx >= max_vocab_size else idx for idx in idxs]
toks = self.tokenizer.convert_ids_to_tokens(idxs)
if recover_bpe:
toks = recover_bpe_encoding(toks)
# toks = ['{:>5}'.format(i) for i in toks]
return ' '.join(toks)
def vec2words(self, tensor_list, recover_bpe=False):
if isinstance(tensor_list[0], torch.Tensor):
idxs = [idx.cpu().item() for idx in tensor_list]
else:
idxs = list(tensor_list)
# filter unk ids
max_vocab_size = len(self.tokenizer.decoder) + len(self.special_tokens)
idxs = [self.pad_idx if idx >= max_vocab_size else idx for idx in idxs]
toks = self.tokenizer.convert_ids_to_tokens(idxs)
if recover_bpe:
toks = recover_bpe_encoding(toks)
# toks = ['{:>5}'.format(i) for i in toks]
return [i.replace('</w>', '') for i in toks] | /rupersonaagent-0.1.0-py3-none-any.whl/personification/agents/common/gpt_dictionary.py | 0.612426 | 0.244916 | gpt_dictionary.py | pypi |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.dict import DictionaryAgent
from parlai.zoo.bert.build import download
try:
from pytorch_pretrained_bert import BertTokenizer
except ImportError:
raise ImportError('BERT rankers needs pytorch-pretrained-BERT installed. \n '
'pip install pytorch-pretrained-bert')
import os
VOCAB_PATH = 'bert-base-uncased-vocab.txt'
class BertDictionaryAgent(DictionaryAgent):
""" Allow to use the Torch Agent with the wordpiece dictionary of Hugging Face.
"""
default_lang = 'english'
default_maxngram = -1
default_minfreq = 0
default_maxtokens = -1
default_null = '[PAD]'
default_start = '[CLS]'
default_end = '[SEP]'
default_unk = '[PAD]'
default_tok = 're'
default_lower = True
default_textfields = 'text,labels'
def __init__(self, opt):
super().__init__(opt)
# initialize from voab path
download(opt['datapath'])
vocab_path = os.path.join(opt['datapath'], 'models', 'bert_models',
VOCAB_PATH)
self.tokenizer = BertTokenizer.from_pretrained(vocab_path)
self.start_token = '[CLS]'
self.end_token = '[SEP]'
self.null_token = '[PAD]'
self.start_idx = self.tokenizer.convert_tokens_to_ids(['[CLS]'])[
0] # should be 101
self.end_idx = self.tokenizer.convert_tokens_to_ids(['[SEP]'])[
0] # should be 102
self.pad_idx = self.tokenizer.convert_tokens_to_ids(['[PAD]'])[0] # should be 0
# set tok2ind for special tokens
self.tok2ind[self.start_token] = self.start_idx
self.tok2ind[self.end_token] = self.end_idx
self.tok2ind[self.null_token] = self.pad_idx
# set ind2tok for special tokens
self.ind2tok[self.start_idx] = self.start_token
self.ind2tok[self.end_idx] = self.end_token
self.ind2tok[self.pad_idx] = self.null_token
def txt2vec(self, text, vec_type=list):
tokens = self.tokenizer.tokenize(text)
tokens_id = self.tokenizer.convert_tokens_to_ids(tokens)
return tokens_id
def vec2txt(self, tensor):
idxs = [idx.item() for idx in tensor.cpu()]
toks = self.tokenizer.convert_ids_to_tokens(idxs)
return ' '.join(toks) | /rupersonaagent-0.1.0-py3-none-any.whl/personification/agents/common/bert_dictonary.py | 0.742422 | 0.242463 | bert_dictonary.py | pypi |
from torch.nn import CrossEntropyLoss
from transformers import T5ForConditionalGeneration
from transformers.modeling_outputs import Seq2SeqLMOutput
import torch
class FiDT5(T5ForConditionalGeneration):
"""
Modification of T5 model that can use Fusion-in-Decoder method
"""
def __init__(self, config):
super().__init__(config)
self.do_fid = False
def make_fid_encoder(self):
self.encoder = FiDEncoder(self.encoder)
def make_base_encoder(self):
self.encoder = self.encoder.encoder
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
encoder_outputs=None,
labels=None,
use_cache=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# If we use Fusion-in-Decoder method
# Change shape to (B * N) * L
# B - Batch size
# N - Number of passages
# L - Length of passages
if self.do_fid:
input_ids = input_ids.view(self.bsz * self.n_passages, -1)
if attention_mask is not None:
attention_mask = attention_mask.view(self.bsz * self.n_passages, -1)
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask
)
# Change shape back to B * (N * L) if used FiD method
if self.do_fid:
encoder_outputs.last_hidden_state = encoder_outputs.last_hidden_state.view(self.bsz, self.n_passages * self.passage_length, -1)
if attention_mask is not None:
attention_mask = attention_mask.view(self.bsz, self.n_passages * self.passage_length)
hidden_states = encoder_outputs[0]
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
use_cache=use_cache,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
if self.config.tie_word_embeddings:
sequence_output = sequence_output * (self.model_dim**-0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=hidden_states,
encoder_hidden_states=None,
encoder_attentions=None,
)
class FiDEncoder(torch.nn.Module):
"""
T5 Encoder that uses Fusion-in-Decoder method
"""
def __init__(self, encoder):
super().__init__()
self.encoder = encoder
self.do_fid = False
self.config = self.encoder.config
self.main_input_name = self.encoder.main_input_name
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_outputs=None,
**kwargs
):
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# If we use Fusion-in-Decoder method
# Change shape to (B * N) * L
# B - Batch size
# N - Number of passages
# L - Length of passages
if self.do_fid:
input_ids = input_ids.view(self.bsz * self.n_passages, -1)
if attention_mask is not None:
attention_mask = attention_mask.view(self.bsz * self.n_passages, -1)
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask
)
# Change shape back to B * (N * L) if used FiD method
if self.do_fid:
encoder_outputs.last_hidden_state = encoder_outputs.last_hidden_state.view(self.bsz, self.n_passages * self.passage_length, -1)
if attention_mask is not None:
attention_mask = attention_mask.view(self.bsz, self.n_passages * self.passage_length)
return encoder_outputs
def get_input_embeddings(self):
return self.encoder.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.encoder.embed_tokens = new_embeddings | /rupersonaagent-0.1.0-py3-none-any.whl/internet_memory_model/fid.py | 0.968013 | 0.273194 | fid.py | pypi |
import os
import bitsandbytes as bnb
import pytorch_lightning as pl
import torch
import torchmetrics
from fid import FiDT5
from peft import (prepare_model_for_kbit_training, LoraConfig,
get_peft_model, TaskType, PeftModel)
from transformers import (GPT2Tokenizer,
get_cosine_schedule_with_warmup, BitsAndBytesConfig)
from utils import SearchModule, compute_metrics, log_metrics, print_trainable_parameters
PERSONA1 = '<Персона пользователя 1>'
PERSONA2 = '<Персона пользователя 2>'
SEARCH_TASK = '<Выполнить поиск>'
KNOWLEDGE_TASK = '<Извлечь знания>'
RESPONSE_TASK = '<Сгенерировать реплику>'
NO_QUERY = '<Нет запроса>'
NO_KNOWLEDGE = '<Нет знаний>'
KNOWLEDGE = '<Знания>'
USER1_REPLY = '<Реплика пользователя 1>'
USER2_REPLY = '<Реплика пользователя 2>'
KNOWLEDGE_RESULT = '<Итоговые знания>'
SEARCH_RESULT = '<Итоговый поиск>'
RESPONSE_RESULT = '<Итоговая реплика>'
# FRED-T5 probably should work fine without special tokens
ATTR_TO_SPECIAL_TOKEN = {
'additional_special_tokens': [
PERSONA1, PERSONA2,
USER1_REPLY, USER2_REPLY,
SEARCH_TASK, KNOWLEDGE_TASK, RESPONSE_TASK,
KNOWLEDGE, NO_KNOWLEDGE, NO_QUERY,
KNOWLEDGE_RESULT, SEARCH_RESULT, RESPONSE_RESULT
]
}
class InternetModel(pl.LightningModule):
def __init__(
self,
model_dir,
model_name,
save_name,
labels_max_length
):
super().__init__()
model_path = os.path.join(model_dir, model_name)
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
if os.path.exists(model_path):
# Load from local path
self.M = FiDT5.from_pretrained(model_path, quantization_config=bnb_config, device_map="auto")
self.tokenizer = GPT2Tokenizer.from_pretrained(model_path, eos_token="</s>", truncation_side='left')
else:
# Load from Huggingface Hub
self.M = FiDT5.from_pretrained(model_name, quantization_config=bnb_config, device_map="auto")
self.tokenizer = GPT2Tokenizer.from_pretrained(model_name, eos_token="</s>", truncation_side='left')
self.search_module = SearchModule()
self.lora_name = os.path.join("models", save_name)
self.metrics = torchmetrics.MetricCollection(
{
'BLEU-1': torchmetrics.BLEUScore(n_gram=1),
'BLEU-2': torchmetrics.BLEUScore(n_gram=2)
}
)
self.labels_max_length = labels_max_length
self.no_search_id = self.tokenizer.convert_tokens_to_ids(NO_QUERY)
self.search_task_id = self.tokenizer.convert_tokens_to_ids(SEARCH_TASK)
self.knowledge_task_id = self.tokenizer.convert_tokens_to_ids(KNOWLEDGE_TASK)
self.response_task_id = self.tokenizer.convert_tokens_to_ids(RESPONSE_TASK)
self.search_result_id = self.tokenizer.convert_tokens_to_ids(SEARCH_RESULT)
self.knowledge_result_id = self.tokenizer.convert_tokens_to_ids(KNOWLEDGE_RESULT)
self.response_result_id = self.tokenizer.convert_tokens_to_ids(RESPONSE_RESULT)
def set_training_args(self, args):
self.lr = args.lr
self.weight_decay = args.weight_decay
self.num_warmup_steps = args.num_warmup_steps
self.num_training_steps = args.num_training_steps
self.batch_size = args.batch_size
def add_lora_adapter(self):
"""
Adds LoRA Adapter to the base model
"""
self.M.gradient_checkpointing_enable()
self.M = prepare_model_for_kbit_training(self.M)
config = LoraConfig(
r=8,
lora_alpha=32,
target_modules=["q", "v", "wi", "wo"],
lora_dropout=0.1,
bias="none",
task_type=TaskType.SEQ_2_SEQ_LM
)
self.M = get_peft_model(self.M, config)
print_trainable_parameters(self.M)
def load_lora_adapter(self):
"""
Loads trained LoRA Adapter for inference
"""
self.M = PeftModel.from_pretrained(self.M, self.lora_name)
def configure_optimizers(self):
optimizer = bnb.optim.PagedAdamW8bit(
self.M.parameters(), lr=self.lr, weight_decay=self.weight_decay
)
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=self.num_warmup_steps,
num_training_steps=self.num_training_steps
)
return [optimizer], [{"scheduler": scheduler, "interval": "step"}]
def training_step(self, batch, batch_idx):
for i in batch:
search_ids, search_mask, search_labels = batch[0], batch[1], batch[2]
knowledge_ids, knowledge_mask, knowledge_labels = batch[3], batch[4], batch[5]
response_ids, response_mask, response_labels = batch[6], batch[7], batch[8]
# Do search task
if search_ids is not None:
batch_size = search_ids.shape[0]
s_loss, _, _ = self.do_forward(
"Search_train",
search_ids,
search_mask,
search_labels,
use_cache=self.use_cache
)
# Do knowledge task
if knowledge_ids is not None:
batch_size = knowledge_ids.shape[0]
k_loss, _, _ = self.do_forward(
"Knowledge_train",
knowledge_ids,
knowledge_mask,
knowledge_labels,
use_cache=self.use_cache,
do_fid=True
)
# Do response task
if response_ids is not None:
batch_size = response_ids.shape[0]
r_loss, _, _ = self.do_forward(
"Response_train",
response_ids,
response_mask,
response_labels,
use_cache=self.use_cache
)
loss = 0.
count = 0
if search_ids is not None:
loss += s_loss
count += 1
if knowledge_ids is not None:
loss += k_loss
count += 1
if response_ids is not None:
loss += r_loss
count += 1
loss /= count
logs = []
logs.append({
'name': 'lr',
'value': self.trainer.optimizers[0].param_groups[0]["lr"],
'on_epoch': False, 'on_step': True})
if search_ids is not None:
logs.append({'name': 'train_s_loss', 'value': s_loss, 'on_epoch': True, 'on_step': True})
if knowledge_ids is not None:
logs.append({'name': 'train_k_loss', 'value': k_loss, 'on_epoch': True, 'on_step': True})
if response_ids is not None:
logs.append({'name': 'train_r_loss', 'value': r_loss, 'on_epoch': True, 'on_step': True})
log_metrics(self, logs, batch_size=batch_size)
return loss
def validation_step(self, batch, batch_idx, dataloader_idx=0):
search_ids, search_mask, search_labels = batch[0], batch[1], batch[2]
knowledge_ids, knowledge_mask, knowledge_labels = batch[3], batch[4], batch[5]
response_ids, response_mask, response_labels = batch[6], batch[7], batch[8]
# Do search task
if search_ids is not None:
s_loss, s_metrics, ans = self.do_forward(
"Search_eval",
search_ids,
search_mask,
search_labels,
do_compute_bleu=True,
)
with open("out.txt", "a", encoding='utf-8') as w:
w.write(" " + self.tokenizer.decode(ans[0]) + "\n")
# Do knowledge task
if knowledge_ids is not None:
k_loss, k_metrics, ans = self.do_forward(
"Knowledge_eval",
knowledge_ids,
knowledge_mask,
knowledge_labels,
do_compute_bleu=True,
do_fid=True
)
with open("out.txt", "a", encoding='utf-8') as w:
w.write(" " + self.tokenizer.decode(ans[0]) + "\n")
# Do response task
if response_ids is not None:
r_loss, r_metrics, ans = self.do_forward(
"Response_eval",
response_ids,
response_mask,
response_labels,
do_compute_bleu=True,
# do_fid=True
)
with open("out.txt", "a", encoding='utf-8') as w:
w.write(" " + self.tokenizer.decode(ans[0]) + "\n")
loss = 0.
count = 0
if search_ids is not None:
loss += s_loss
count += 1
if knowledge_ids is not None:
loss += k_loss
count += 1
if response_ids is not None:
loss += r_loss
count += 1
logs = []
log_dicts = []
if search_ids is not None:
logs.append({'name': 'eval_s_loss', 'value': s_loss, 'on_epoch': True, 'on_step': True})
log_dicts.append({'value': s_metrics, 'on_epoch': True, 'on_step': True})
if knowledge_ids is not None:
logs.append({'name': 'eval_k_loss', 'value': k_loss, 'on_epoch': True, 'on_step': True})
log_dicts.append({'value': k_metrics, 'on_epoch': True, 'on_step': True})
if response_ids is not None:
logs.append({'name': 'eval_r_loss', 'value': r_loss, 'on_epoch': True, 'on_step': True})
log_dicts.append({'value': r_metrics, 'on_epoch': True, 'on_step': True})
log_metrics(self, logs, log_dicts)
return loss
def test_step(self, batch, batch_idx, dataloader_idx=0):
search_ids, search_mask, search_labels = batch[0], batch[1], batch[2]
knowledge_ids, knowledge_mask, knowledge_labels = batch[3], batch[4], batch[5]
response_ids, response_mask, response_labels = batch[6], batch[7], batch[8]
# Do search task
if search_ids is not None:
_, s_metrics, ans = self.do_forward(
"Search_test",
search_ids,
search_mask,
search_labels,
do_generation=True,
do_compute_bleu=True,
forced_token=self.search_result_id
)
with open("out.txt", "a", encoding='utf-8') as w:
w.write(" " + self.tokenizer.decode(ans[0]) + "\n")
# Do knowledge task
if knowledge_ids is not None:
_, k_metrics, ans = self.do_forward(
"Knowledge_test",
knowledge_ids,
knowledge_mask,
knowledge_labels,
do_fid=True,
do_generation=True,
do_compute_bleu=True,
forced_token=self.knowledge_result_id
)
with open("out.txt", "a", encoding='utf-8') as w:
w.write(" " + self.tokenizer.decode(ans[0]) + "\n")
# Do response task
if response_ids is not None:
_, r_metrics, ans = self.do_forward(
"Response_test",
response_ids,
response_mask,
response_labels,
do_generation=True,
do_compute_bleu=True,
penalty_alpha=0.6,
top_k=4,
forced_token=self.response_result_id
)
with open("out.txt", "a", encoding='utf-8') as w:
w.write(" " + self.tokenizer.decode(ans[0]) + "\n")
log_dicts = []
if search_ids is not None:
log_dicts.append({'value': s_metrics, 'on_epoch': True, 'on_step': True})
if knowledge_ids is not None:
log_dicts.append({'value': k_metrics, 'on_epoch': True, 'on_step': True})
if response_ids is not None:
log_dicts.append({'value': r_metrics, 'on_epoch': True, 'on_step': True})
log_metrics(self, log_dicts=log_dicts)
def do_forward(
self,
step_name,
input_ids=None,
attention_mask=None,
labels=None,
use_cache=None,
do_fid=False,
do_compute_bleu=False,
do_generation=False,
penalty_alpha=None,
top_k=None,
forced_token=None
):
M = self.M.get_base_model()
M.do_fid = do_fid
M.encoder.do_fid = do_fid
if do_fid:
M.bsz = input_ids.shape[0]
M.n_passages = input_ids.shape[1]
M.passage_length = input_ids.shape[2]
M.encoder.bsz = input_ids.shape[0]
M.encoder.n_passages = input_ids.shape[1]
M.encoder.passage_length = input_ids.shape[2]
if do_generation:
if forced_token is not None:
forced_token = [[1, forced_token]]
answer = self.M.generate(
input_ids=input_ids.view(input_ids.size(0), -1),
max_new_tokens=self.labels_max_length,
penalty_alpha=penalty_alpha,
top_k=top_k,
no_repeat_ngram_size=3,
early_stopping=True,
forced_decoder_ids=forced_token
)
loss = 0.
else:
out = self.M(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
use_cache=use_cache
)
loss, logits = out.loss, out.logits
if loss.isnan():
loss = 1e-3
answer = torch.argmax(logits, dim=-1)
if do_compute_bleu:
metrics, f1 = compute_metrics(self, answer, labels, generation=do_generation)
metrics = {k + f" {step_name}": metrics[k] for k in metrics}
metrics[f"F1 {step_name}"] = f1
return loss, metrics, answer
return loss, None, answer | /rupersonaagent-0.1.0-py3-none-any.whl/internet_memory_model/in_model.py | 0.624523 | 0.249185 | in_model.py | pypi |
import os
from argparse import ArgumentParser
import in_model as im
import torch
from datasets import Dataset
from transformers import AutoModel, AutoTokenizer
from tqdm.auto import tqdm
def add_cmdline_args():
parser = ArgumentParser()
parser.add_argument(
'--save_name',
type=str,
default='f-t5-big-internet',
help='Path where trained LoRA adapter should be placed'
)
parser.add_argument(
"--retriever_name",
type=str,
default='rubert-base-retriever',
help='Name of Retriever model'
)
parser.add_argument(
"--model_name",
type=str,
default='f-t5-big',
help='Model name. Can be a name from Huggingface Hub'
)
parser.add_argument(
"--model_dir",
type=str,
default='pretrained_models',
help='Path to folder with pretrained model'
)
parser.add_argument(
"--passage_max_length",
type=int,
default=128,
help='Maximum length of passages in knowledge task'
)
parser.add_argument(
"--labels_max_length",
type=int,
default=128,
help='Maximum length of labels in tokens'
)
parser.add_argument(
"--passage_count",
type=int,
default=5,
help="Number of passages per 1 context sample in knowledge task"
)
return parser.parse_args()
def predict(
context,
personas,
model,
retriever,
r_tokenizer,
passage_max_length,
passage_count
):
"""
Make the full model's process from getting the search query
to getting the final response
"""
search_query = get_search_query(
model, context, personas
)
print(search_query)
if im.NO_QUERY not in search_query:
knowledge_passages = get_data_from_external_sources(
model, retriever, r_tokenizer,
search_query, context,
passage_max_length, passage_count
)
knowledge = extract_knowledge(
model, knowledge_passages, context
)
else:
knowledge = im.NO_KNOWLEDGE
print(knowledge)
return get_response(
model, context, personas, knowledge
)
def get_response(
model,
context,
personas,
knowledge
):
"""
Generate the final response
"""
tokenizer = model.tokenizer
device = model.M.device
response_ids = tokenizer(
"<LM>" + context + personas + knowledge + im.RESPONSE_TASK,
return_tensors='pt'
).input_ids.to(device)
_, _, response = model.do_forward(
"Inference response",
input_ids=response_ids,
do_fid=False,
do_generation=True,
penalty_alpha=0.6,
top_k=4
)
return tokenizer.decode(response[0], skip_special_tokens=True)
def extract_knowledge(
model,
knowledge_passages,
context
):
"""
Extract knowledge
"""
tokenizer = model.tokenizer
device = model.M.device
knowledge_passages[-1] += im.KNOWLEDGE_TASK
knowledge_ids = tokenizer(
["<LM>" + context + i for i in knowledge_passages],
padding=True,
truncation=True,
return_tensors='pt'
).input_ids.to(device)
knowledge_ids = knowledge_ids.unsqueeze(0)
_, _, knowledge = model.do_forward(
"Inference knowledge",
input_ids=knowledge_ids,
do_fid=True,
do_generation=True
)
return tokenizer.decode(knowledge[0], skip_special_tokens=True)
def get_data_from_external_sources(
model,
retriever,
tokenizer,
search_query,
context,
passage_max_length,
passage_count
):
"""
Get data from the Internet and long-term memory.
Retrieve the most relevant candidates from it.
"""
device = retriever.device
search_result = model.search_module.do_search(search_query)
passage_ids = tokenizer(
search_result,
max_length=passage_max_length,
padding=True,
truncation=True,
return_tensors='pt'
).input_ids.to(device)
context_ids = tokenizer(
"<LM>" + context,
max_length=2 * passage_max_length,
truncation=True,
return_tensors='pt'
).input_ids.to(device)
result = get_relevant_passages(
passage_ids, context_ids,
retriever, passage_count
)
return [
tokenizer.decode(i, skip_special_tokens=True)
for i in result[0][0]
] + result[1]
def get_search_query(
model,
context,
personas
):
"""
Generate the search query
"""
tokenizer = model.tokenizer
device = model.M.device
search_ids = tokenizer(
"<LM>" + context + personas + im.SEARCH_TASK,
return_tensors='pt'
).input_ids.to(device)
_, _, search_query = model.do_forward(
"Inference search",
input_ids=search_ids,
do_generation=True
)
return tokenizer.decode(search_query[0], skip_special_tokens=True)
def get_relevant_passages(
passage_ids,
context_ids,
retriever,
top_N
):
"""
Retrieve top_N most relevant texts from
the Internet search and long-term memory
"""
print(passage_ids.shape)
q_pred = retriever(input_ids=context_ids).pooler_output
# Process data from Internet
p_pred = None
for i in tqdm(range(0, passage_ids.shape[0])):
batch = passage_ids[i].unsqueeze(0)
ans = retriever(input_ids=batch).pooler_output
if p_pred is None:
p_pred = ans
else:
p_pred = torch.cat((p_pred, ans))
similarity = torch.mm(q_pred, p_pred.transpose(0, 1))
softmax_score = torch.nn.functional.log_softmax(similarity, dim=-1)
score, idx = torch.topk(softmax_score, min(passage_ids.shape[0], top_N))
print(score)
# Process embeddings from memory if exists
if not retriever.without_memory:
similarity = torch.mm(q_pred, retriever.memory.transpose(0, 1))
softmax_score = torch.nn.functional.log_softmax(similarity, dim=-1)
score, mem_idx = torch.topk(softmax_score, min(retriever.memory.shape[0], top_N))
print(score)
texts = [retriever.texts[i] for i in mem_idx.detach().cpu().numpy()[0]]
else:
texts = []
return passage_ids[idx], texts
def load_from_memory(retriever):
"""
Load data from memory source
"""
path = "retriever/data/memory/embeddings.csv"
if not os.path.exists(path):
retriever.without_memory = True
else:
retriever.without_memory = False
data = Dataset.from_csv("retriever/data/memory/embeddings.csv")
retriever.memory = torch.from_numpy(data.to_pandas().to_numpy()).to(torch.float)
retriever.memory = retriever.memory.to(retriever.device)
retriever.texts = Dataset.from_csv("retriever/data/memory/texts.csv")['0']
def main(args):
model = im.InternetModel(
args.model_dir,
args.model_name,
args.save_name,
args.labels_max_length
)
model.load_lora_adapter()
model.M.make_fid_encoder()
model.M.eval()
retriever = AutoModel.from_pretrained(os.path.join("retriever/models", args.retriever_name))
r_tokenizer = AutoTokenizer.from_pretrained(os.path.join("retriever/models", args.retriever_name))
retriever.eval()
# retriever.to(device)
load_from_memory(retriever)
context = ""
persona1 = []
persona2 = []
print("Your persona:")
[print(" " + i) for i in persona1]
print("_" * 15)
print("Bot persona:")
[print(" " + i) for i in persona2]
print("_" * 15)
personas = im.PERSONA1 + im.PERSONA1.join(persona1) + im.PERSONA2 + im.PERSONA2.join(persona2)
user_in = input("User: ")
while user_in != "stop":
context += im.USER1_REPLY + user_in + '\n'
response = predict(
context,
personas,
model,
retriever,
r_tokenizer,
args.passage_max_length,
args.passage_count
)
print(f" Bot: {response}")
context += im.USER2_REPLY + response + '\n'
user_in = input("User: ")
if __name__ == "__main__":
args = add_cmdline_args()
main(args) | /rupersonaagent-0.1.0-py3-none-any.whl/internet_memory_model/inference.py | 0.721743 | 0.207335 | inference.py | pypi |
import os
from argparse import ArgumentParser
import dataset
import pytorch_lightning as pl
import torch
import torch.utils.data
from model import Retriever
from pytorch_lightning import loggers as pl_loggers
def add_cmdline_args():
parser = ArgumentParser()
parser.add_argument(
'--save_name',
type=str,
default='rubert-base-retriever',
help='Save path'
)
parser.add_argument(
'--model_dir',
type=str,
default='../pretrained_models',
help='Path where the model is placed'
)
parser.add_argument(
"--model_name",
type=str,
default='rubert-base',
help='Model name. Can be the model name from Huggingface Hub'
)
parser.add_argument(
'--batch_size',
type=int,
default=6,
help='Batch size used during training'
)
parser.add_argument(
'--num_workers',
type=int,
default=8,
help='Number of workers in dataloaders'
)
parser.add_argument(
'--num_epochs',
type=int,
default=3,
help='Total number of epochs during trinaing'
)
parser.add_argument(
'--accumulation_steps',
type=int,
default=8,
help='Number of accumulation steps during training'
)
parser.add_argument(
"--context_max_length",
type=int,
default=32,
help='Maximum length of context in tokens'
)
parser.add_argument(
"--passage_max_length",
type=int,
default=32,
help='Maximum length of each candidate in tokens'
)
parser.add_argument(
"--lr",
type=float,
default=5e-5,
help='Learning rate'
)
parser.add_argument(
"--weight_decay",
type=float,
default=1e-4,
help=''
)
parser.add_argument(
"--num_warmup_steps",
type=int,
default=100,
help=''
)
return parser.parse_args()
def main(args):
torch.set_float32_matmul_precision('high')
data_dir = os.path.join(os.curdir, 'data')
sbquad_path = 'sberquad'
miracl_path = 'miracl'
wizint_path = '../internet_model/data/wizint_rus'
save_dir = os.path.join(os.curdir, 'models')
save_path = os.path.join(save_dir, args.save_name)
train_sbquad = dataset.InternetDataset(
os.path.join(data_dir, sbquad_path, 'train.jsonl'))
valid_sbquad = dataset.InternetDataset(
os.path.join(data_dir, sbquad_path, 'valid.jsonl'))
train_miracl = dataset.InternetDataset(
os.path.join(data_dir, miracl_path, 'train.jsonl'))
valid_miracl = dataset.InternetDataset(
os.path.join(data_dir, miracl_path, 'valid.jsonl'))
train_wizint = dataset.InternetDataset(os.path.join(wizint_path, 'train_f.jsonl'), wizint_data=True)
"""valid_wizint = dataset.InternetDataset(
os.path.join(wizint_path, 'valid.jsonl'),
wizint_data=True)"""
train = torch.utils.data.ConcatDataset([train_wizint, train_sbquad, train_miracl])
valid = torch.utils.data.ConcatDataset([valid_sbquad, valid_miracl])
args.num_training_steps = len(train) * args.num_epochs // (args.accumulation_steps * args.batch_size) + 1
model = Retriever(
args.model_dir,
args.model_name
)
model.set_training_params(
args.lr,
args.weight_decay,
args.num_warmup_steps,
args.num_training_steps,
args.context_max_length,
args.passage_max_length
)
train_loader = torch.utils.data.DataLoader(
train,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
collate_fn=lambda data=train, tokenizer=model.tokenizer,
context_max_length=args.context_max_length,
passage_max_length=args.passage_max_length:
dataset.collate_fn(
data,
tokenizer,
context_max_length,
passage_max_length
)
)
valid_loader = torch.utils.data.DataLoader(
valid,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
collate_fn=lambda data=valid, tokenizer=model.tokenizer,
context_max_length=args.context_max_length,
passage_max_length=args.passage_max_length: dataset.collate_fn(data, tokenizer, context_max_length, passage_max_length)
)
version = f"Train Retriever {args.model_name}. \
Context length={args.context_max_length}. \
Passage length={args.passage_max_length}"
logger = pl_loggers.TensorBoardLogger(save_dir="logs/", version=version)
trainer = pl.Trainer(
accelerator='gpu',
accumulate_grad_batches=args.accumulation_steps,
devices=1,
enable_checkpointing=False,
logger=logger,
precision="16-mixed",
max_epochs=args.num_epochs
)
trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=valid_loader)
model.M.save_pretrained(save_path)
model.tokenizer.save_pretrained(save_path)
if __name__ == "__main__":
args = add_cmdline_args()
main(args) | /rupersonaagent-0.1.0-py3-none-any.whl/internet_memory_model/retriever/train.py | 0.675229 | 0.19789 | train.py | pypi |
import os
import pytorch_lightning as pl
import torch
import torchmetrics
from transformers import (BertModel, AutoTokenizer,
get_cosine_schedule_with_warmup)
PERSONA1 = '<Персона пользователя 1>'
PERSONA2 = '<Персона пользователя 2>'
SEARCH_TASK = '<Выполнить поиск>'
KNOWLEDGE_TASK = '<Извлечь знания>'
RESPONSE_TASK = '<Сгенерировать реплику>'
NO_QUERY = '<Нет запроса>'
NO_KNOWLEDGE = '<Нет знаний>'
KNOWLEDGE = '<Знания>'
USER1_REPLY = '<Реплика пользователя 1>'
USER2_REPLY = '<Реплика пользователя 2>'
KNOWLEDGE_RESULT = '<Итоговые знания>'
SEARCH_RESULT = '<Итоговый поиск>'
RESPONSE_RESULT = '<Итоговая реплика>'
QUESTION = '<Вопрос>'
CANDIDATE = '<Кандидат>'
ATTR_TO_SPECIAL_TOKEN = {
'additional_special_tokens': [
PERSONA1, PERSONA2,
USER1_REPLY, USER2_REPLY,
SEARCH_TASK, KNOWLEDGE_TASK, RESPONSE_TASK,
KNOWLEDGE, NO_KNOWLEDGE, NO_QUERY,
KNOWLEDGE_RESULT, SEARCH_RESULT, RESPONSE_RESULT
]
}
class Retriever(pl.LightningModule):
def __init__(
self,
model_dir,
model_name
):
super().__init__()
model_path = os.path.join(model_dir, model_name)
if os.path.exists(model_path): # Load from local path
self.M = BertModel.from_pretrained(model_path)
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
else: # Load from Huggingface Hub
self.M = BertModel.from_pretrained(model_name)
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
self.tokenizer.add_special_tokens(ATTR_TO_SPECIAL_TOKEN)
self.M.resize_token_embeddings(len(self.tokenizer))
self.metrics = torchmetrics.MetricCollection(
{
'Top@1': TopKAccuracy(topk=1),
'Top@5': TopKAccuracy(topk=5),
'Top@20': TopKAccuracy(topk=20),
'Top@30': TopKAccuracy(topk=30),
}
)
self.loss_fn = torch.nn.CrossEntropyLoss()
self.save_hyperparameters(ignore=["M"])
def set_training_params(
self,
lr,
weight_decay,
num_warmup_steps,
num_training_steps,
context_max_length,
passage_max_length
):
self.lr = lr
self.weight_decay = weight_decay
self.num_warmup_steps = num_warmup_steps
self.num_training_steps = num_training_steps
self.context_max_length = context_max_length
self.passage_max_length = passage_max_length
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=self.num_warmup_steps,
num_training_steps=self.num_training_steps
)
return [optimizer], [{"scheduler": scheduler, "interval": "step"}]
def dot(self, x, y):
return torch.mm(x, y.transpose(0, 1))
def training_step(self, batch, batch_idx):
q_ids, q_mask = batch[0], batch[1]
p_ids, p_mask = batch[2], batch[3]
labels = batch[4]
q_pred = self.M(
input_ids=q_ids,
attention_mask=q_mask
).last_hidden_state[:, 0]
p_pred = self.M(
input_ids=p_ids,
attention_mask=p_mask
).last_hidden_state[:, 0]
similarity = self.dot(q_pred, p_pred)
softmax_score = torch.nn.functional.log_softmax(similarity, dim=-1)
loss = self.loss_fn(softmax_score, labels)
accuracy = self.metrics(softmax_score, labels.long())
accuracy = {f"Train {k}": accuracy[k] for k in accuracy}
self.log(
name='lr',
value=self.trainer.optimizers[0].param_groups[0]["lr"],
on_epoch=False, on_step=True
)
self.log(
name='Train loss',
value=loss,
on_epoch=True, on_step=True
)
self.log_dict(
accuracy,
on_epoch=True, on_step=True
)
return loss
def validation_step(self, batch, batch_idx):
q_ids, q_mask = batch[0], batch[1]
p_ids, p_mask = batch[2], batch[3]
labels = batch[4]
q_pred = self.M(
input_ids=q_ids,
attention_mask=q_mask
).last_hidden_state[:, 0]
p_pred = self.M(
input_ids=p_ids,
attention_mask=p_mask
).last_hidden_state[:, 0]
similarity = self.dot(q_pred, p_pred)
softmax_score = torch.nn.functional.log_softmax(similarity, dim=-1)
loss = self.loss_fn(softmax_score, labels)
accuracy = self.metrics(softmax_score, labels.long())
accuracy = {f"Validation {k}": accuracy[k] for k in accuracy}
self.log(
name='Validation loss',
value=loss,
on_epoch=True, on_step=True
)
self.log_dict(
accuracy,
on_epoch=True, on_step=True
)
return loss
class TopKAccuracy(torchmetrics.Metric):
def __init__(self, topk=1):
super().__init__()
self.add_state("correct", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
self.topk = topk
def update(self, preds: torch.Tensor, target: torch.Tensor):
assert preds.shape == target.shape
if self.topk < preds.shape[1]:
_, pred = preds.topk(self.topk, dim=1, largest=True, sorted=True)
self.correct += (target * torch.zeros_like(target).scatter(1, pred[:, :self.topk], 1)).sum()
else:
self.correct += target.sum()
self.total += target.sum()
def compute(self):
return self.correct.float() / self.total | /rupersonaagent-0.1.0-py3-none-any.whl/internet_memory_model/retriever/model.py | 0.629091 | 0.381047 | model.py | pypi |
import asyncio
from abc import ABCMeta, abstractmethod
from contextlib import suppress
from enum import Enum, unique
from urllib.parse import urlencode, urlparse
from bs4 import BeautifulSoup
from search.exceptions import NoResultsOrTrafficError
from .utils import CacheHandler, get_rand_user_agent
@unique
class ReturnType(Enum):
FULL = "full"
TITLE = "titles"
DESCRIPTION = "descriptions"
LINK = "links"
# All results returned are each items of search
class SearchItem(dict):
"""
SearchItem is a dict of results containing keys (titles, descriptions, links and other
additional keys dependending on the engine)
>>> result
<search_engine_parser.core.base.SearchItem object at 0x7f907426a280>
>>> result["description"]
Some description
>>> result["descriptions"]
Same description
"""
def __getitem__(self, value):
""" Allow getting by index and by type ('descriptions', 'links'...)"""
try:
return super().__getitem__(value)
except KeyError:
pass
if not value.endswith('s'):
value += 's'
return super().__getitem__(value)
class SearchResult():
"""
The SearchResults after the searching
>>> results = gsearch.search("preaching the choir", 1)
>>> results
<search_engine_parser.core.base.SearchResult object at 0x7f907426a280>
The object supports retreiving individual results by iteration of just by type
>>> results[0] # Returns the first result <SearchItem>
>>> results["descriptions"] # Returns a list of all descriptions from all results
It can be iterated like a normal list to return individual SearchItem
"""
def __init__(self):
self.results = []
def append(self, value):
self.results.append(value)
def __getitem__(self, value):
""" Allow getting by index and by type ('descriptions', 'links'...)"""
if isinstance(value, int):
return self.results[value]
links = []
for x in self.results:
with suppress(KeyError):
links.append(x[value])
return links
def keys(self):
keys = {}
with suppress(IndexError):
x = self.results[0]
keys = x.keys()
return keys
def __len__(self):
return len(self.results)
def __repr_(self):
return "<SearchResult: {} results>".format(len(self.results))
class BaseSearch:
__metaclass__ = ABCMeta
"""
Search base to be extended by search parsers
Every subclass must have two methods `search` amd `parse_single_result`
"""
# Summary of engine
summary = None
# Search Engine Name
name = None
# Search Engine unformatted URL
search_url = None
# The url after all query params have been set
_parsed_url = None
# boolean that indicates cache hit or miss
_cache_hit = False
@abstractmethod
def parse_soup(self, soup):
"""
Defines the results contained in a soup
"""
raise NotImplementedError("subclasses must define method <parse_soup>")
@abstractmethod
def parse_single_result(self, single_result, return_type=ReturnType.FULL, **kwargs):
"""
Every div/span containing a result is passed here to retrieve
`title`, `link` and `descr`
"""
raise NotImplementedError(
"subclasses must define method <parse_results>")
def get_cache_handler(self):
""" Return Cache Handler to use"""
return CacheHandler()
@property
def cache_handler(self):
return self.get_cache_handler()
def parse_result(self, results, **kwargs):
"""
Runs every entry on the page through parse_single_result
:param results: Result of main search to extract individual results
:type results: list[`bs4.element.ResultSet`]
:returns: dictionary. Containing lists of titles, links, descriptions and other possible\
returns.
:rtype: dict
"""
search_results = SearchResult()
for each in results:
rdict = self.parse_single_result(each, **kwargs)
if rdict is not None:
search_results.append(rdict)
return search_results
def get_params(self, query=None, page=None, offset=None, **kwargs):
""" This function should be overwritten to return a dictionary of query params"""
return {'q': query, 'page': page}
def headers(self):
headers = {
"Cache-Control": 'no-cache',
"Connection": "keep-alive",
"User-Agent": get_rand_user_agent()
}
return headers
def clear_cache(self, all_cache=False):
"""
Triggers the clear cache function for a particular engine
:param all_cache: if True, deletes for all engines
"""
if all_cache:
return self.cache_handler.clear()
return self.cache_handler.clear(self.name)
async def get_source(self, url, cache=True, proxy=None, proxy_auth=None):
"""
Returns the source code of a webpage.
Also sets the _cache_hit if cache was used
:rtype: string
:param url: URL to pull it's source code
:param proxy: proxy address to make use off
:type proxy: str
:param proxy_auth: (user, password) tuple to authenticate proxy
:type proxy_auth: (str, str)
:return: html source code of a given URL.
"""
try:
html, cache_hit = await self.cache_handler.get_source(self.name, url, self.headers(), cache, proxy, proxy_auth)
except Exception as exc:
raise Exception('ERROR: {}\n'.format(exc))
self._cache_hit = cache_hit
return html
async def get_soup(self, url, cache, proxy, proxy_auth):
"""
Get the html soup of a query
:param url: url to obrain soup from
:type url: str
:param cache: cache request or not
:type cache: bool
:param proxy: proxy address to make use off
:type proxy: str
:param proxy_auth: (user, password) tuple to authenticate proxy
:type proxy_auth: (str, str)
:rtype: `bs4.element.ResultSet`
"""
html = await self.get_source(url, cache, proxy, proxy_auth)
return BeautifulSoup(html, 'lxml')
def get_search_url(self, query=None, page=None, **kwargs):
"""
Return a formatted search url
"""
# Some URLs use offsets
offset = (page * 10) - 9
params = self.get_params(
query=query, page=page, offset=offset, **kwargs)
url = urlparse(self.search_url)
# For localization purposes, custom urls can be parsed for the same engine
# such as google.de and google.com
if kwargs.get("url"):
new_url = urlparse(kwargs.pop("url"))
# When passing url without scheme e.g google.de, url is parsed as path
if not new_url.netloc:
url = url._replace(netloc=new_url.path)
else:
url = url._replace(netloc=new_url.netloc)
self.base_url = url.geturl()
self._parsed_url = url._replace(query=urlencode(params))
return self._parsed_url.geturl()
def get_results(self, soup, **kwargs):
""" Get results from soup"""
search_results = None
results = self.parse_soup(soup)
# TODO Check if empty results is caused by traffic or answers to query
# were not found
if not results:
print("ENGINE FAILURE: {}\n".format(self.name))
raise NoResultsOrTrafficError(
"The result parsing was unsuccessful. It is either your query could not be found"
" or it was flagged as unusual traffic")
try:
search_results = self.parse_result(results, **kwargs)
# AttributeError occurs as it cannot pass the returned soup
except AttributeError:
raise NoResultsOrTrafficError(
"The returned results could not be parsed. This might be due to site updates or "
"server errors. Drop an issue at https://github.com/bisoncorps/search-engine-parser"
" if this persists"
)
return search_results
def search(self, query=None, page=1, cache=True, proxy=None, proxy_auth=None, **kwargs):
"""
Query the search engine
:param query: the query to search for
:type query: str
:param page: Page to be displayed, defaults to 1
:type page: int
:param proxy: proxy address to make use off
:type proxy: str
:param proxy_auth: (user, password) tuple to authenticate proxy
:type proxy_auth: (str, str)
:return: dictionary. Containing titles, links, netlocs and descriptions.
"""
# Pages can only be from 1-N
if page <= 0:
page = 1
# Get search Page Results
loop = asyncio.get_event_loop()
url = self.get_search_url(query, page, **kwargs)
soup = loop.run_until_complete(
self.get_soup(url,
cache=cache,
proxy=proxy,
proxy_auth=proxy_auth))
return self.get_results(soup, **kwargs)
async def async_search(self, query=None, page=1, cache=True, proxy=None, proxy_auth=None, **kwargs):
"""
Query the search engine but in async mode
:param query: the query to search for
:type query: str
:param page: Page to be displayed, defaults to 1
:type page: int
:param proxy: proxy address to make use off
:type proxy: str
:param proxy_auth: (user, password) tuple to authenticate proxy
:type proxy_auth: (str, str)
:return: dictionary. Containing titles, links, netlocs and descriptions.
"""
# Pages can only be from 1-N
if page <= 0:
page = 1
soup = await self.get_soup(self.get_search_url(query, page, **kwargs), cache=cache, proxy=proxy, proxy_auth=proxy_auth)
return self.get_results(soup, **kwargs) | /rupersonaagent-0.1.0-py3-none-any.whl/internet_memory_model/search/base.py | 0.769946 | 0.170681 | base.py | pypi |
from search.base import BaseSearch, ReturnType, SearchItem
class Search(BaseSearch):
"""
Searches DuckDuckGo for string
"""
name = "DuckDuckGo"
base_url = "https://www.duckduckgo.com"
search_url = "https://www.duckduckgo.com/html/?"
summary = "\tHas a number of advantages over the other search engines. \n\tIt has a clean " \
"interface, it does not track users, it is not fully loaded with ads and has a number " \
"of very nice features (only one page of results, you can search directly other web " \
"sites etc).\n\tAccording to DuckDuckGo traffic stats [December, 2018], they are " \
"currently serving more than 30 million searches per day."
def get_params(self, query=None, page=None, offset=None, **kwargs):
params = {}
params["q"] = query
params["s"] = 0 if (page < 2) else (((page - 1) * 50) - 20)
params["dc"] = offset
params["o"] = "json"
params["api"] = "d.js"
return params
def parse_soup(self, soup):
"""
Parses DuckDuckGo Search Soup for a query results
"""
# find all div tags
return soup.find_all('div', class_='result')
def parse_single_result(self, single_result, return_type=ReturnType.FULL, **kwargs):
"""
Parses the source code to return
:param single_result: single result found in <div id="r1-{id}">
:type single_result: `bs4.element.ResultSet`
:return: parsed title, link and description of single result
:rtype: dict
"""
rdict = SearchItem()
if return_type in (ReturnType.FULL, return_type.TITLE):
h2 = single_result.find(
'h2', class_="result__title") # pylint: disable=invalid-name
# Get the text and link
rdict["titles"] = h2.text.strip()
if return_type in (ReturnType.FULL, ReturnType.LINK):
link_tag = single_result.find('a', class_="result__a")
if link_tag is not None:
rdict["links"] = link_tag.get('href')
else:
rdict['links'] = None
if return_type in (ReturnType.FULL, ReturnType.DESCRIPTION):
desc = single_result.find(class_='result__snippet')
if desc is not None:
rdict["descriptions"] = desc.text
else:
rdict["descriptions"] = ""
if rdict['links'] is None:
rdict = None
return rdict | /rupersonaagent-0.1.0-py3-none-any.whl/internet_memory_model/search/engines/duckduckgo.py | 0.750187 | 0.303293 | duckduckgo.py | pypi |
import re
from search.base import BaseSearch, ReturnType, SearchItem
class Search(BaseSearch):
"""
Searches Yahoo for string
"""
name = "Yahoo"
search_url = "https://search.yahoo.com/search?"
summary = "\tYahoo is one the most popular email providers and holds the fourth place in "\
"search with 3.90% market share.\n\tFrom October 2011 to October 2015, Yahoo search "\
"was powered exclusively by Bing. \n\tSince October 2015 Yahoo agreed with Google to "\
"provide search-related services and since then the results of Yahoo are powered both "\
"by Google and Bing. \n\tYahoo is also the default search engine for Firefox browsers "\
"in the United States (since 2014)."
def get_params(self, query=None, page=None, offset=None, **kwargs):
params = {}
params["p"] = query
params["b"] = offset
return params
def parse_soup(self, soup):
"""
Parses Yahoo for a search query
"""
# find all divs
return soup.find_all('div', class_='Sr')
def parse_single_result(self, single_result, return_type=ReturnType.FULL, **kwargs):
"""
Parses the source code to return
:param single_result: single result found in <div class="Sr">
:type single_result: `bs4.element.ResultSet`
:return: parsed title, link and description of single result
:rtype: dict
"""
rdict = SearchItem()
h3_tag = single_result.find('h3', class_='title')
if return_type in (ReturnType.FULL, return_type.TITLE):
title = h3_tag.text
rdict["titles"] = title
if return_type in (ReturnType.FULL, ReturnType.LINK):
link_tag = h3_tag.find('a')
raw_link = link_tag.get('href')
re_str = re.findall("/RU=(.+)/RK", raw_link)[0]
re_str = re_str.replace("%3a", ":")
link = re_str.replace("%2f", "/")
rdict["links"] = link
if return_type in (ReturnType.FULL, return_type.DESCRIPTION):
desc = single_result.find('span', class_='fc-falcon')
rdict["descriptions"] = desc.text
return rdict | /rupersonaagent-0.1.0-py3-none-any.whl/internet_memory_model/search/engines/yahoo.py | 0.548674 | 0.275154 | yahoo.py | pypi |
import requests
from web3 import Web3
import json
import numpy as np
import urllib.parse as urlparse
import time
from ..utils import helper
from ..web3_utils import web3_globals, web3_generic
def is_list_of_addresses(self,address_list:str):
'''
web3_generic method: send simple transaction
Parameters
--------
address_list : str
comma separated list of addresses
Returns:
--------
bool
False if at least one address in address list is not a valid Web3 Address
'''
list = address_list.split(',')
for item in list:
if not Web3.isAddress(item):
return False
return True
def get_lp_pool_of_contract(connection, schema, w3:Web3,chain_id:int,contract_address:str):
swap_contract_factory_abi = web3_generic.getABI(connection, schema, w3.toChecksumAddress(web3_globals.AddrUniSwapV2FactoryAdress),chain_id)
swap_contract_factory_instance = w3.eth.contract(address=w3.toChecksumAddress(web3_globals.AddrUniSwapV2FactoryAdress), abi=swap_contract_factory_abi)
WETH_contract_address = w3.toChecksumAddress(web3_globals.get_address_weth_or_wbnb(chain_id,w3))
lp_contract_address = swap_contract_factory_instance.functions.getPair(WETH_contract_address,w3.toChecksumAddress(contract_address)).call()
return lp_contract_address
def get_contract_info(connection, schema, w3:Web3,chain_id:int,contract_address:str,function_name:str,abi:str=None,args:list=[],blockNumber=None):
if abi == None:
contract_abi = web3_generic.getABI(w3.toChecksumAddress(contract_address),chain_id)
else:
contract_abi = abi
contract_instance = w3.eth.contract(address=w3.toChecksumAddress(contract_address), abi=contract_abi)
function_instance = contract_instance.functions[function_name]
if args == []:
return function_instance().call()
else:
if blockNumber != None:
return function_instance(*args).call(block_identifier=blockNumber)
else:
return function_instance(*args).call()
def gini(array):
"""Calculate the Gini coefficient of a numpy array."""
# based on bottom eq: http://www.statsdirect.com/help/content/image/stat0206_wmf.gif
# from: http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
array = array.flatten() #all values are treated equally, arrays must be 1d
if np.amin(array) < 0:
array -= np.amin(array) #values cannot be negative
array += 0.0000001 #values cannot be 0
array = np.sort(array) #values must be sorted
index = np.arange(1,array.shape[0]+1) #index per array element
n = array.shape[0]#number of array elements
return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array))) #Gini coefficient
def get_token_holder(contract_address:str,covalent_api_key:str,max_working_time_sec:int):
headers = {
'Content-Type': 'application/json'
}
covalent_api_key = 'ckey_5625cf364c744bcb8116949ce76'
#https://api.covalenthq.com/v1/1/tokens/
#0x8b3192f5eebd8579568a2ed41e6feb402f93f73f/
#match=%7B%22balance%22%3A%7B%22%24gt%22%3A0%7D%7D&sort=%7B%22balance%22%3A+-1%7D&key=ckey_5625cf364c744bcb8116949ce76" \
match = {"balance":{"$gt":0}}
sort = {"balance": -1}
total_supply = 0
total_supply_minus_burn = 0
total_supply_api = 0
token_holder = 0
token_holder_api = 0
holdings = []
page = 0
begin = time.time()
while True:
url = 'https://api.covalenthq.com/v1/1/tokens/{0}/token_holders/?page-number={1}&page-size=1000&match={2}&sort={3}&key={4}'.format(contract_address,page,urlparse.quote_plus(json.dumps(match)),urlparse.quote_plus(json.dumps(sort)),covalent_api_key)
begin_loop = time.time()
response = requests.request('GET',url,headers=headers)
result = json.loads(response.text)
total_supply_api = int(result['data']['items'][0]['total_supply'])
token_holder_api = int(result['data']['pagination']['total_count'])
if len(result) == 0:
return []
for item in result['data']['items']:
if item['address'][0:20] == '0x000000000000000000':
total_supply+= int(item['balance'])
else:
total_supply+= int(item['balance'])
total_supply_minus_burn+= int(item['balance'])
token_holder+=1
holdings.append(float(item['balance']))
end_loop = time.time()
if (end_loop - begin_loop) + (end_loop - begin) > max_working_time_sec:
break
elif not result['data']['pagination']['has_more']:
break
else:
page+= 1
gini_coefficient = gini(np.array(holdings))
#print('Token Holder (perc loaded): {0}-{1} ({2} perc)'.format(token_holder_api,token_holder,100*token_holder/token_holder_api))
#print('total supply: {0} {1}'.format(total_supply,total_supply_api))
#print('total supply minus burn: {0} ({1} perc)'.format(total_supply_minus_burn,100*total_supply_minus_burn/total_supply))
#print('Gini coefficient: {0}'.format(gini_coefficient))
return {
'token_holder_api': token_holder_api,
'token_holder_count': token_holder,
'total_supply_api': total_supply_api,
'total_supply_sum': total_supply,
'total_supply_sum_minus_burn': total_supply_minus_burn,
'gini_coefficient': gini_coefficient
}
#get_token_holder('0x96150e34f8b56b59a53c2caab4510edb3085d070','ckey_5625cf364c744bcb8116949ce76',20)
def get_initial_lp_pool_reserves(connection, schema, w3:Web3,chain_id:int,lp_contract_address:str,starting_block:str,ending_block:str):
headers = {
'Content-Type': 'application/json'
}
covalent_api_key = 'ckey_5625cf364c744bcb8116949ce76'
#sort = {"block_height": -1, "log_offset": -1}
topic = '0x1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1'
#url = "https://api.covalenthq.com/v1/1/events/topics/{0}/?starting-block={1}&ending-block=13487690&sender-address={2}&page-size=5&sort={3}&key={4}".format(topic,starting_block,lp_contract_address,urlparse.quote_plus(json.dumps(sort)),covalent_api_key)
url = "https://api.covalenthq.com/v1/1/events/topics/{0}/?starting-block={1}&ending-block={2}&sender-address={3}&page-size=5&key={4}".format(topic,starting_block,ending_block,lp_contract_address,covalent_api_key)
response = requests.request('GET',url,headers=headers)
result = json.loads(response.text)
reserves = [0,0]
reserves_raw = result['data']['items'][0]['decoded']['params']
for r in reserves_raw:
if r['name'] == "reserve0":
reserves[0] = int(r['value'])
else:
reserves[1] = int(r['value'])
result = {}
lp_contract_abi = web3_generic.getABI(connection, schema, w3.toChecksumAddress(lp_contract_address),chain_id)
lp_contract_instance = w3.eth.contract(address=w3.toChecksumAddress(lp_contract_address), abi=lp_contract_abi)
token0_address = lp_contract_instance.functions.token0().call()
if token0_address == web3_globals.get_address_weth_or_wbnb(chain_id,w3):
result.update({
'WETH': reserves[0],
'Token': reserves[1]
})
else:
result.update({
'WETH': reserves[1],
'Token': reserves[0]
})
return result
# w3 =GetProviderAnkr(1)
# begin = time.time()
# lp = get_lp_pool_of_contract(w3,1,'0xB161D34E6E1E46170d156C8D9809EEaD4e8bc4Fd')
# print(get_initial_lp_pool_reserves(w3,1,lp,13405216,13405416))
# end = time.time()
# print(end-begin) | /rupineWeb3Utils-0.0.26.tar.gz/rupineWeb3Utils-0.0.26/RupineWeb3Utils/web3_utils/web3_checker.py | 0.441191 | 0.150746 | web3_checker.py | pypi |
from web3 import Web3
from eth_abi import encode_single, encode_abi
from eth_account import Account
import secrets
import requests
import json
import math
import os
from RupineHeroku.rupine_db import herokuCredentials, herokuAbi
headers = {
'User-Agent': 'NMask User Agent 1.0',
'From': 'nmask@nmask.com' # This is another valid field
}
def create_address():
'''
web3_generic method: create public key and private key for ETH and BNB.
Returns
--------
list
first element ist private key, second element ist public key
'''
private_key_raw = secrets.token_hex(32)
private_key = "0x" + private_key_raw
public_key = Account.from_key(private_key).address
return [private_key,public_key]
def create_address_from_private_key(private_key:str):
'''
web3_generic method: creates public key from private key.
Parameters
--------
private_key : str
string of private key. No need of leading "0x"
Returns:
--------
list
first element ist private key, second element ist public key
'''
if private_key == None or len(private_key) == 0:
return [None,None]
else:
if private_key[:2] != '0x':
private_key = "0x" + private_key
try:
public_key = Account.from_key(private_key).address
return [private_key,public_key]
except Exception as e:
print('Private Key Corrupt: Cannot create public key')
print(e)
return [None,None]
def get_eth_or_bsc_scan_api_url(chain_id:int):
'''
web3_generic method: get the API enpoint URLs for Rinkeby, Etherem Mainnet, BSC Testnet and BSC Testnet
Parameters
--------
chain_id : int
official chain id (e.g. 1 for Ethereum, 56 for Binance)
Returns:
--------
str
begin of the Etherscan or Bscscan API endpoint
'''
if chain_id == 1:
url = 'https://api.etherscan.io/api'
elif chain_id == 3:
url = 'https://api-ropsten.etherscan.io/api'
elif chain_id == 4:
url = 'https://api-rinkeby.etherscan.io/api'
elif chain_id == 56:
url = 'https://api.bscscan.com/api'
elif chain_id == 97:
url = 'https://api-testnet.bscscan.com/api'
else:
return None
return url
def get_contract_abi_by_eth_or_bsc_scan(contract:str,chain_id:int,api_key:str):
'''
web3_generic method: get contract abi
Parameters
--------
contract : str
contract address
chain_id : int
official chain id (e.g. 1 for Ethereum, 56 for Binance)
api_key : str
API Key of the Etherscan or Bscscan API endpoint
Returns:
--------
str
(Default: None) string of contract ABI
'''
url = get_eth_or_bsc_scan_api_url(chain_id)
if url != None and len(api_key) != 0 and Web3.isAddress(contract):
api_url = ''.join([url,'?module=contract&action=getabi&address={0}&tag=latest&apikey={1}'.format(contract,api_key)])
response = requests.get(api_url,headers=headers)
parsed = json.loads(response.text)
if parsed['message'] == 'NOTOK':
print('{0}: {1}'.format(contract,parsed['result']))
return None
abi = parsed['result']
return abi
return None
def call_contract_function(web3_instance:Web3,contract_address:str,abi:str,contract_function:str,args:list=None):
'''
web3_generic method: call a contract function and get return
Parameters
--------
web3_instance : Web3
initialised Web3 Instance, e.g. with HTTPProvider
contract_address : str
contract address
abi : str
contract ABI
contract_function : str
function name
Returns:
--------
any
(Default: None) function return
'''
if Web3.isAddress(contract_address):
my_contract = web3_instance.eth.contract(address=Web3.toChecksumAddress(contract_address), abi=abi)
contract_function = my_contract.functions[contract_function]
if args == None:
result = contract_function().call()
else:
result = contract_function(*args).call()
return result
return None
def get_address_balance_by_eth_or_bsc_scan(address:str,chain_id:int,api_key:str,contract_address:str=None):
'''
web3_generic method: get ETH/BNB or token balance for address
Parameters
--------
address : str
address
chain_id : int
official chain id (e.g. 1 for Ethereum, 56 for Binance)
api_key : str
API Key of the Etherscan or Bscscan API endpoint
contract_address : str
(Default: None) contract address of a token
Returns:
--------
int
(Default: None) ETH/BNB (contract_address=None) or Token (contract_address has value) balance for address.
'''
url = get_eth_or_bsc_scan_api_url(chain_id)
if url != None and len(api_key) != 0 and Web3.isAddress(address):
if contract_address == None:
api_url = ''.join([url,'?module=account&action=balance&address={0}&tag=latest&apikey={1}'.format(address,api_key)])
elif Web3.isAddress(contract_address):
api_url =''.join([url,'?module=account&action=tokenbalance&address={0}&contractaddress={1}&tag=latest&apikey={2}'.format(address,contract_address,api_key)])
else:
return None
try:
response = requests.get(api_url,headers=headers)
if response.text == 'The service is unavailable.':
return None
return int(json.loads(response.text)['result'])
except Exception as e:
print(e)
return None
else:
return None
def get_addresses_balance_by_eth_or_bsc_scan(addresses:list,chain_id:int,api_key:str,token_address:str=None):
'''
web3_generic method: get ETH/BNB or token balance for addresslist.
Parameters
--------
address : list
list of addresses
chain_id : int
official chain id (e.g. 1 for Ethereum, 56 for Binance)
api_key : str
API Key of the Etherscan or Bscscan API endpoint
contract_address : str
(Default: None) contract address of a token
Returns:
--------
dict
(Default: None) ETH/BNB (contract_address=None) or Token (contract_address has value) balance for addresslist.
'''
url = get_eth_or_bsc_scan_api_url(chain_id)
if url != None and len(api_key) != 0 and len(addresses) != 0:
for address in addresses:
if not Web3.isAddress(address):
return None
address_with_balance = {}
# API allows max if 20 Addresses equally:
for i in range(0,math.ceil(len(addresses)/20)):
addresses_slice = addresses[int(i*20):int((i+1)*20)]
if token_address == None:
api_url = ''.join([url,'?module=account&action=balancemulti&address={0}&tag=latest&apikey={1}'.format(','.join(addresses_slice),api_key)])
try:
if token_address == None:
response = requests.get(api_url,headers=headers)
if response.text == 'The service is unavailable.':
return None
for r in json.loads(response.text)['result']:
address_with_balance.update({r['account']: int(r['balance'])})
else:
for address in addresses_slice:
address_with_balance.update({address: get_address_balance_by_eth_or_bsc_scan(address,
chain_id,
api_key,
token_address
)})
return address_with_balance
except:
return None
def get_usd_price_by_eth_or_bsc_scan(chain_id:int,api_key:str):
'''
web3_generic method: get ETH/BNB USD Price for timestamp
Parameters
--------
chain_id : int
official chain id (e.g. 1 for Ethereum, 56 for Binance)
api_key : str
API Key of the Etherscan or Bscscan API endpoint
Returns:
--------
dict
(Default: None) ETH/BNB (contract_address=None) or Token (contract_address has value) balance for addresslist.
'''
url = get_eth_or_bsc_scan_api_url(chain_id)
if chain_id in [56,97]:
api_url = ''.join([url,'?module=stats&action=bnbprice&apikey={0}'.format(api_key)])
else:
api_url = ''.join([url,'?module=stats&action=ethprice&apikey={0}'.format(api_key)])
try:
response = requests.get(api_url,headers=headers)
if response.text == 'The service is unavailable.':
return None
return json.loads(response.text)['result']
except Exception as e:
print(e)
return None
def build_contract_transaction(web3_instance:Web3,contract_address:str,abi:str,tx:dict,function:str,function_args:list):
if Web3.isAddress(contract_address):
my_contract = web3_instance.eth.contract(address=contract_address, abi=abi)
contract_function = my_contract.functions[function]
return contract_function(*function_args).buildTransaction(tx)
return None
def send_transaction(web3_instance:Web3,chain_id:int,tx:dict,private_key:str,nonce_with_offset:int=None):
'''
web3_generic method: send simple transaction
Parameters
--------
web3_instance : Web3
initialised Web3 Instance, e.g. with HTTPProvider
chain_id : int
official chain id (e.g. 1 for Ethereum, 56 for Binance)
tx : dict
dictionary with transaction keys (e.g. value,from,to,etc.) and respective values.
private_key : str
private key of send address, to sign transaction
nonce_offset : int
(Default: 0) For multiple transactions, Nonce has to to be calculated with fixed nonce parameter and offset
Returns:
--------
str
(Default: None) transaction hash when successful
'''
if nonce_with_offset != None:
tx['nonce'] = int(nonce_with_offset)
else:
tx['nonce'] = int(web3_instance.eth.getTransactionCount(create_address_from_private_key(private_key)[1]))
# maybe deprecated
tx['chainId'] = chain_id
#sign the transaction
print(tx)
signed_tx = web3_instance.eth.account.sign_transaction(tx, private_key)
#send transaction
try:
tx_hash = web3_instance.eth.sendRawTransaction(signed_tx.rawTransaction)
#get transaction hash
return web3_instance.toHex(tx_hash)
except Exception as e:
print(e)
return None
def get_base_fee(web3_instance:Web3,chain_id:int):
if chain_id == 56 or chain_id == 97:
return None
try:
block = web3_instance.eth.get_block('latest')
return {
'timestamp': block['timestamp'],
'number': block['number'],
'baseFeePerGas': block['baseFeePerGas']
}
except Exception as e:
print(e)
return None
def getABI(connection, schema, contract_address:str,chain_id:int):
if not Web3.isAddress(contract_address):
return None
#filename = '{0}_{1}.json'.format(str(chain_id),contract_address)
result = herokuAbi.getAbi(connection, schema, contract_address, chain_id)
data = []
if result:
return result
else:
if chain_id in [1,3,4]:
api_key = herokuCredentials.getCredential(connection, schema, 'Etherscan DEV','API KEY', 1)
else:
api_key = herokuCredentials.getCredential(connection, schema, 'BSCScan DEV','API KEY', 56)
abi = get_contract_abi_by_eth_or_bsc_scan(contract_address,chain_id,api_key)
if abi != None:
herokuAbi.updateAbi(connection, schema, abi, Web3.toChecksumAddress(contract_address), chain_id)
#write_to_file(os.environ.get('ABI_LOCATION'),filename,abi)
return abi | /rupineWeb3Utils-0.0.26.tar.gz/rupineWeb3Utils-0.0.26/RupineWeb3Utils/web3_utils/web3_generic.py | 0.622459 | 0.187839 | web3_generic.py | pypi |
import json
import os
import sys
import time
import datetime
from pandas.core.frame import DataFrame
from web3 import Web3
from collections import deque
def datetime_string_to_datetime(datetime_string:str):
'''
utils helper method: string to datetime type
Parameters
--------
datetime_string : str
string of datetime with format %Y-%m-%d %H:%M:%S.%f
Results
--------
datetime
'''
t = time.strptime(datetime_string,'%Y-%m-%d %H:%M:%S.%f')
ts = time.mktime(t)
d = datetime.datetime.fromtimestamp(ts)
return d
def split_string(string:str,delimiter:str=','):
'''
utils helper method: split string by delimiter
Parameters
--------
string : str
string to split
delimiter : str
delimiter character
Results
--------
list
'''
return string.split(delimiter)
def compare_dictionaries(d1:dict,d2:dict):
'''
utils helper method: compare two dictionaries and return
Parameters
--------
d1 : dict
first dictionary
d1 : dict
second dictionary
Results
--------
list
added, removed, modified, same
'''
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
shared_keys = d1_keys.intersection(d2_keys)
removed = d1_keys - d2_keys
added = d2_keys - d1_keys
modified = {o : (d1[o], d2[o]) for o in shared_keys if d1[o] != d2[o]}
same = set(o for o in shared_keys if d1[o] == d2[o])
return added, removed, modified, same
def check_if_event_changes_window_state(key_list:list,comparison:tuple,added=False,removed=False,modified=False,same=False):
states = [added,removed,modified,same]
for idx,s in enumerate(states):
if s:
for item in key_list:
if item in comparison[idx]:
return True
return False
def replace_or_create_key_in_dictionary_from_value(dictionary:dict,new_key:str,value:any):
'''
utils helper method: if dictionary has a key, the new value is initialised. If key
does not exist, new key is created with value.
Parameters
--------
dictionary : dict
dictionary
new_key : str
name of the new key
value : any
value of any datatype
Results
--------
dict
modified dictionary
'''
if value in dictionary.values():
old_key = [k for k,v in dictionary.items() if v == value][0]
v =dictionary[old_key]
del dictionary[old_key]
dictionary[new_key] = v
else:
dictionary[new_key] = value
return dictionary
def read_from_file(directory:str,filename:str):
if len(directory) != 0:
directory = directory + '/'
filename_path = resource_path(os.path.normpath('{0}{1}'.format(directory,filename)))
try:
file = open(filename_path)
result = file.read()
file.close()
return result
except Exception as e:
print(e)
return None
def write_to_file(directory:str,filename:str,data:str):
if len(directory) != 0:
directory = directory + '/'
filename_path = resource_path(os.path.normpath('{0}{1}'.format(directory,filename)))
try:
file = open(filename_path,'x')
result = file.write(data)
file.close()
return True
except Exception as e:
print(e)
return False
def check_ethereum_types_for_value(value,type,output='check'):
if type == 'uint256':
if output == 'check': return check_if_int(value)
else: return int(value)
elif type == 'address':
if output == 'check': return Web3.isAddress(value)
else: return Web3.toChecksumAddress(value)
elif type == 'address[]':
for idx,v in enumerate(value):
if output == 'check' and not Web3.isAddress(v):
return False
elif Web3.isAddress(v):
value[idx] = Web3.toChecksumAddress(v)
if output == 'check':
return True
else:
return value
def flatten_list(l:list):
output = []
for item in l:
if isinstance(item,list):
for sub_item in item:
output.append(sub_item)
else:
output.append(item)
return output
def check_if_float(potential_float):
if potential_float == None:
return False
try:
float(potential_float)
return True
except ValueError:
return False
def check_if_int(potential_int):
if potential_int == None:
return False
try:
int(potential_int)
return True
except ValueError:
return False
def check_if_has_key_or_key_is_none(d:dict,key:str):
if key in d:
if d[key] != None:
return True
return False
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
base_path = os.path.dirname(sys.executable)
else:
base_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '../..'))
return os.path.join(base_path, relative_path)
def nvl(value,default):
'''
utils helper method: function like the ORACLE SQL NVL-Function
Parameters
--------
value
value to check
default
output if value is None
Results
--------
value or default if value is None
'''
if value == None:
return default
return value
def add_leading_zeros_to_int(i:int,length:int):
'''
utils helper method: add leading zeros to an integer
Parameters
--------
i : int
value to check
length: int
length of the output string
Results
--------
str
(Default None) a string with leading zeros followed by the input integer i
'''
number_length = len(str(i))
if number_length > length:
return None
return '{0}{1}'.format((length-number_length)*'0',str(i))
def get_value_from_dict_or_default(dictionary,key:str,default):
'''
utils helper method:
Parameters
--------
dictionary
dictionary to get value from key. If not a dictionary, default is returned
key: str
key of dictionary
default
default output when key not in dictionary
Results
--------
Value from dictionary or input value default
'''
if not isinstance(dictionary,dict):
return default
if key == None:
return default
if key in dictionary:
return dictionary[key]
return default
def rotate_list(l:list,n:int=1):
'''
utils helper method: rotate a list l n times
Parameters
--------
l : list
dictionary to get value from key
n: int
(Default: 1) if n ist positive, rotate n time from left to right. If negative, rotate n times from right to left
Results
--------
list
rotatet list
'''
deque_list = deque(l)
deque_list.rotate(n)
return list(deque_list)
def dict_key_has_value(dictionary:dict,key:str,value):
'''
utils helper method: check is dictionary has value.
Parameters
--------
dictionary : dict
dictionary to check key has value
key: str
key name
value : any
value of key
Results
--------
bool
'''
if dictionary == None:
return False
if key in dictionary:
if dictionary[key] == value:
return True
return False
def get_duration(now:datetime.datetime,then:datetime.datetime, interval = "default"):
'''
utils helper method: get time difference between two datetimes
Parameters
--------
now : datetime
datetime of now
then: datetime
datetime of then
interval : str
(Default: default) in default mode, you get a string with the time difference. For values in "years","days","hours","minutes","seconds" you get the pure value
Results
--------
str | int
time difference string or number of time difference interval
'''
if then > now:
duration = then - now # For build-in functions
else:
duration = now -then
duration_in_s = duration.total_seconds()
def years():
return divmod(duration_in_s, 31536000) # Seconds in a year=31536000.
def days(seconds = None):
return divmod(seconds if seconds != None else duration_in_s, 86400) # Seconds in a day = 86400
def hours(seconds = None):
return divmod(seconds if seconds != None else duration_in_s, 3600) # Seconds in an hour = 3600
def minutes(seconds = None):
return divmod(seconds if seconds != None else duration_in_s, 60) # Seconds in a minute = 60
def seconds(seconds = None):
if seconds != None:
return divmod(seconds, 1)
return duration_in_s
def totalDuration():
y = years()
d = days(y[1]) # Use remainder to calculate next variable
h = hours(d[1])
m = minutes(h[1])
s = seconds(m[1])
res = []
if int(y[0]) > 0:
res.append('{} years'.format(int(y[0])))
if int(d[0]) > 0:
res.append('{} days'.format(int(d[0])))
if int(h[0]) > 0:
res.append('{} hours'.format(int(h[0])))
if int(m[0]) > 0:
res.append('{} minutes'.format(int(m[0])))
if int(s[0]) > 0:
res.append('{} seconds'.format(int(s[0])))
return ', '.join(res)
return {
'years': int(years()[0]),
'days': int(days()[0]),
'hours': int(hours()[0]),
'minutes': int(minutes()[0]),
'seconds': int(seconds()),
'default': totalDuration()
}[interval]
def map_function_input_names_to_value(abi:str,function:str,function_args:list):
abi = json.loads(abi)
for a in abi:
if get_value_from_dict_or_default(a,'type',None) == 'function' and check_if_has_key_or_key_is_none(a,'inputs'):
if a['name'] == function:
for idx,i in enumerate(a['inputs']):
function_args[idx] = {i['name']: {'value': function_args[idx],'type': i['type']}}
return function_args
def get_function_from_abi(abi:str=None,function_name:str=None):
if abi == None or function_name == None:
return None
abi = json.loads(abi)
for a in abi:
if get_value_from_dict_or_default(a,'type',None) == 'function' and 'name' in a:
if a['name'] == function_name:
return a
return None
def not_flag(flag:str):
'''
utils helper method: returns opposite flag value
Parameters
--------
flag : str
'Y' or 'N'
Results
--------
'Y' or 'N'; None if flag intput not 'Y' or 'N'
'''
if flag == 'Y':
return 'N'
elif flag == 'N':
return 'Y'
else:
return None
def add_pandas_df_column(df:DataFrame,column_name:str,function_on_row,function_args:list=[]):
df[column_name] = df.apply(lambda row: function_on_row(row,*function_args), axis=1) | /rupineWeb3Utils-0.0.26.tar.gz/rupineWeb3Utils-0.0.26/RupineWeb3Utils/utils/helper.py | 0.506347 | 0.289409 | helper.py | pypi |
r"""
Exact segmentation: dynamic programming
====================================================================================================
Description
----------------------------------------------------------------------------------------------------
The method is implemented in :class:`ruptures.detection.Dynp`.
Roughly speaking, it computes the cost of all subsequences of a given signal.
The number of computed costs is of the order :math:`\mathcal{O}(Kn^2)`, where :math:`K` is the number
of change points and :math:`n` the number of samples.
This has to be multiplied by the computational cost of computing the approximation error on one
sub-sequence.
Consequently, piecewise constant models are significantly faster than linear or autoregressive
models.
Computational cost is drastically reduced when considering only a subsample of possible change
points.
When calling :meth:`ruptures.detection.Dynp.__init__`, the minimum distance between change points
can be set through the keyword ``'min_size'``; through the parameter ``'jump'``, only change
point indexes multiple of a particular value are considered.
Usage
----------------------------------------------------------------------------------------------------
.. code-block:: python
import numpy as np
import matplotlib.pylab as plt
import ruptures as rpt
# creation of data
n, dim = 500, 3
n_bkps, sigma = 3, 5
signal, bkps = rpt.pw_constant(n, dim, n_bkps, noise_std=sigma)
# change point detection
model = "l1" # "l2", "rbf"
algo = rpt.Dynp(model=model, min_size=3, jump=5).fit(signal)
my_bkps = algo.predict(n_bkps=3)
# show results
rpt.show.display(signal, bkps, my_bkps, figsize=(10, 6))
plt.show()
Code explanation
----------------------------------------------------------------------------------------------------
.. autoclass:: ruptures.detection.Dynp
:members:
:special-members: __init__
"""
from functools import lru_cache
from ruptures.utils import sanity_check
from ruptures.costs import cost_factory
from ruptures.base import BaseCost, BaseEstimator
class Dynp(BaseEstimator):
""" Find optimal change points using dynamic programming.
Given a segment model, it computes the best partition for which the sum of errors is minimum.
"""
def __init__(self, model="l2", custom_cost=None, min_size=2, jump=5, params=None):
"""Creates a Dynp instance.
Args:
model (str, optional): segment model, ["l1", "l2", "rbf"]. Not used if ``'custom_cost'`` is not None.
custom_cost (BaseCost, optional): custom cost function. Defaults to None.
min_size (int, optional): minimum segment length.
jump (int, optional): subsample (one every *jump* points).
params (dict, optional): a dictionary of parameters for the cost instance.
Returns:
self
"""
self.seg = lru_cache(maxsize=None)(self._seg) # dynamic programming
if custom_cost is not None and isinstance(custom_cost, BaseCost):
self.cost = custom_cost
else:
if params is None:
self.cost = cost_factory(model=model)
else:
self.cost = cost_factory(model=model, **params)
self.min_size = max(min_size, self.cost.min_size)
self.jump = jump
self.n_samples = None
def _seg(self, start, end, n_bkps):
"""Recurrence to find the optimal partition of signal[start:end].
This method is to be memoized and then used.
Args:
start (int): start of the segment (inclusive)
end (int): end of the segment (exclusive)
n_bkps (int): number of breakpoints
Returns:
dict: {(start, end): cost value, ...}
"""
jump, min_size = self.jump, self.min_size
if n_bkps == 0:
cost = self.cost.error(start, end)
return {(start, end): cost}
elif n_bkps > 0:
# Let's fill the list of admissible last breakpoints
multiple_of_jump = (k for k in range(start, end) if k % jump == 0)
admissible_bkps = list()
for bkp in multiple_of_jump:
n_samples = bkp - start
# first check if left subproblem is possible
if sanity_check(n_samples, n_bkps, jump, min_size):
# second check if the right subproblem has enough points
if end - bkp >= min_size:
admissible_bkps.append(bkp)
assert len(
admissible_bkps) > 0, "No admissible last breakpoints found.\
start, end: ({},{}), n_bkps: {}.".format(start, end, n_bkps)
# Compute the subproblems
sub_problems = list()
for bkp in admissible_bkps:
left_partition = self.seg(start, bkp, n_bkps - 1)
right_partition = self.seg(bkp, end, 0)
tmp_partition = dict(left_partition)
tmp_partition[(bkp, end)] = right_partition[(bkp, end)]
sub_problems.append(tmp_partition)
# Find the optimal partition
return min(sub_problems, key=lambda d: sum(d.values()))
def fit(self, signal):
"""Create the cache associated with the signal.
Dynamic programming is a recurrence; intermediate results are cached to speed up
computations. This method sets up the cache.
Args:
signal (array): signal. Shape (n_samples, n_features) or (n_samples,).
Returns:
self
"""
# clear cache
self.seg.cache_clear()
# update some params
self.cost.fit(signal)
self.n_samples = signal.shape[0]
return self
def predict(self, n_bkps):
"""Return the optimal breakpoints.
Must be called after the fit method. The breakpoints are associated with the signal passed
to fit().
Args:
n_bkps (int): number of breakpoints.
Returns:
list: sorted list of breakpoints
"""
partition = self.seg(0, self.n_samples, n_bkps)
bkps = sorted(e for s, e in partition.keys())
return bkps
def fit_predict(self, signal, n_bkps):
"""Fit to the signal and return the optimal breakpoints.
Helper method to call fit and predict once
Args:
signal (array): signal. Shape (n_samples, n_features) or (n_samples,).
n_bkps (int): number of breakpoints.
Returns:
list: sorted list of breakpoints
"""
self.fit(signal)
return self.predict(n_bkps) | /ruptures_headless-1.0a1.post2-py3-none-any.whl/ruptures/detection/dynp.py | 0.940216 | 0.790813 | dynp.py | pypi |
import numpy as np
from numpy.linalg import lstsq
from ruptures.utils import pairwise
class GreedyLinear:
"""Greedy change point detection for piecewise linear processes."""
def __init__(self, jump=10):
"""
Args:
jump (int, optional): only consider change points multiple of *jump*. Defaults to 10.
Returns:
self
"""
self.jump = jump
self.n_samples = None
self.signal = None
self.covariates = None
self.dim = None
def seg(self, n_bkps=None, pen=None, epsilon=None):
"""Computes the greedy segmentation.
The stopping rule depends on the parameter passed to the function.
Args:
n_bkps (int): number of breakpoints to find before stopping.
penalty (float): penalty value (>0)
epsilon (float): reconstruction budget
Returns:
list: list of breakpoint indexes
"""
stop = False
bkps = [self.n_samples]
inds = np.arange(self.jump, self.n_samples - self.jump, self.jump)
residual = self.signal
res_norm = residual.var() * self.n_samples
while not stop:
# greedy search
res_list = list()
for ind in inds: # greedy search
res_tmp = 0
y_left, y_right = residual[:ind], residual[ind:]
x_left, x_right = self.covariates[:ind], self.covariates[ind:]
for x, y in zip((x_left, x_right), (y_left, y_right)):
# linear fit
_, res_sub, _, _ = lstsq(x, y)
# error on sub-signal
res_tmp += res_sub
res_list.append(res_tmp)
# find best index
_, bkp_opt = min(zip(res_list, inds))
# orthogonal projection
proj = np.zeros(self.signal.shape)
for start, end in pairwise(sorted([0, bkp_opt] + bkps)):
y = self.signal[start:end]
x = self.covariates[start:end]
coef, _, _, _ = lstsq(x, y)
proj[start:end] = x.dot(coef).reshape(-1, 1)
residual = self.signal - proj
# stopping criterion
stop = True
if n_bkps is not None:
if len(bkps) - 1 < n_bkps:
stop = False
elif pen is not None:
if res_norm - residual.var() * self.n_samples > pen:
stop = False
elif epsilon is not None:
if residual.var() * self.n_samples > epsilon:
stop = False
# update
if not stop:
res_norm = residual.var() * self.n_samples
bkps.append(bkp_opt)
bkps.sort()
return bkps
def fit(self, signal, covariates):
"""Compute params to segment signal.
Args:
signal (array): univariate signal to segment. Shape (n_samples, 1) or (n_samples,).
covariates (array): covariates. Shape (n_samples, n_features).
Returns:
self
"""
# update some params
if signal.ndim == 1:
self.signal = signal.reshape(-1, 1) - signal.mean()
else:
self.signal = signal - signal.mean()
self.n_samples, dim = self.signal.shape
assert dim == 1, "Signal must be 1D."
self.covariates = covariates
_, self.dim = self.covariates.shape
assert covariates.ndim == 2, "Reshape the covariates."
assert covariates.shape[0] == self.n_samples, "Check size."
return self
def predict(self, n_bkps=None, pen=None, epsilon=None):
"""Return the optimal breakpoints.
Must be called after the fit method. The breakpoints are associated with the signal passed
to fit().
The stopping rule depends on the parameter passed to the function.
Args:
n_bkps (int): number of breakpoints to find before stopping.
penalty (float): penalty value (>0)
penalty (float): penalty value
Returns:
list: sorted list of breakpoints
"""
msg = "Give a parameter."
assert any(param is not None for param in (n_bkps, pen, epsilon)), msg
bkps = self.seg(n_bkps=n_bkps, pen=pen, epsilon=epsilon)
return bkps
def fit_predict(self, signal, covariates, n_bkps=None, pen=None, epsilon=None):
"""Helper method to call fit and predict once."""
self.fit(signal, covariates)
return self.predict(n_bkps=n_bkps, pen=pen, epsilon=epsilon) | /ruptures_headless-1.0a1.post2-py3-none-any.whl/ruptures/detection/greedylinear.py | 0.904376 | 0.529568 | greedylinear.py | pypi |
import numpy as np
from numpy.lib.stride_tricks import as_strided
from numpy.linalg import lstsq
from ruptures.utils import pairwise
class GreedyAR:
"""Greedy change point detection for piecewise autoregressive processes."""
def __init__(self, order=2, jump=10):
"""
Args:
order (int, optional): order of the autoregressive process. Defaults to 2.
jump (int, optional): only consider change points multiple of *jump*. Defaults to 10.
Returns:
self
"""
self.order = max(1, order)
self.jump = max(jump, 2 * (order + 1))
self.n_samples = None
self.signal = None
self.covariates = None
self.dim = None
def seg(self, n_bkps=None, pen=None, epsilon=None):
"""Computes the greedy segmentation.
The stopping rule depends on the parameter passed to the function.
Args:
n_bkps (int): number of breakpoints to find before stopping.
penalty (float): penalty value (>0)
epsilon (float): reconstruction budget
Returns:
list: list of breakpoint indexes
"""
stop = False
n_samples = self.n_samples - self.order
bkps = [n_samples]
inds = np.arange(start=self.jump - self.order,
stop=n_samples - self.jump,
step=self.jump)
signal = self.signal[-n_samples:]
residual = signal
res_norm = residual.var() * n_samples
while not stop:
# greedy search
res_list = list()
for ind in inds: # greedy search
res_tmp = 0
y_left, y_right = residual[:ind], residual[ind:]
x_left, x_right = self.covariates[:ind], self.covariates[ind:]
for x, y in zip((x_left, x_right), (y_left, y_right)):
# linear fit
_, res_sub, _, _ = lstsq(x, y)
# error on sub-signal
res_tmp += res_sub
res_list.append(res_tmp)
# find best index
_, bkp_opt = min(zip(res_list, inds))
# orthogonal projection
proj = np.zeros(signal.shape)
for start, end in pairwise(sorted([0, bkp_opt] + bkps)):
y = signal[start:end]
x = self.covariates[start:end]
coef, _, _, _ = lstsq(x, y)
proj[start:end] = x.dot(coef).reshape(-1, 1)
residual = signal - proj
# stopping criterion
stop = True
if n_bkps is not None:
if len(bkps) - 1 < n_bkps:
stop = False
elif pen is not None:
if res_norm - residual.var() * n_samples > pen:
stop = False
elif epsilon is not None:
if residual.var() * n_samples > epsilon:
stop = False
# update
if not stop:
res_norm = residual.var() * n_samples
bkps.append(bkp_opt)
bkps.sort()
return bkps
def fit(self, signal):
"""Compute params to segment signal.
Args:
signal (array): univariate signal to segment. Shape (n_samples, 1) or (n_samples,).
Returns:
self
"""
# update some params
if signal.ndim == 1:
self.signal = signal.reshape(-1, 1) - signal.mean()
else:
self.signal = signal - signal.mean()
self.n_samples, dim = self.signal.shape
assert dim == 1, "Signal must be 1D."
# covariates are lagged sub-sequences
shape = (self.n_samples - self.order, self.order)
strides = (self.signal.itemsize, self.signal.itemsize)
covariates = np.lib.stride_tricks.as_strided(self.signal,
shape=shape, strides=strides)
intercept = np.ones(self.n_samples - self.order)
covariates = np.column_stack((covariates, intercept))
self.covariates = covariates
return self
def predict(self, n_bkps=None, pen=None, epsilon=None):
"""Return the optimal breakpoints.
Must be called after the fit method. The breakpoints are associated with the signal passed
to fit().
The stopping rule depends on the parameter passed to the function.
Args:
n_bkps (int): number of breakpoints to find before stopping.
penalty (float): penalty value (>0)
penalty (float): penalty value
Returns:
list: sorted list of breakpoints
"""
msg = "Give a parameter."
assert any(param is not None for param in (n_bkps, pen, epsilon)), msg
bkps = self.seg(n_bkps=n_bkps, pen=pen, epsilon=epsilon)
return [b + self.order for b in bkps]
def fit_predict(self, signal, n_bkps=None, pen=None, epsilon=None):
"""Helper method to call fit and predict once."""
self.fit(signal)
return self.predict(n_bkps=n_bkps, pen=pen, epsilon=epsilon) | /ruptures_headless-1.0a1.post2-py3-none-any.whl/ruptures/detection/greedyar.py | 0.91661 | 0.52141 | greedyar.py | pypi |
r"""
Exact segmentation: Pelt
====================================================================================================
Description
----------------------------------------------------------------------------------------------------
The method is implemented in :class:`ruptures.detection.Pelt`.
Because the enumeration of all possible partitions impossible, the algorithm relies on a pruning
rule. Many indexes are discarded, greatly reducing the computational cost while retaining the
ability to find the optimal segmentation.
The implementation follows :cite:`b-Killick2012a`. In addition, under certain conditions on the change
point repartition, the computational complexity is linear on average.
When calling :meth:`ruptures.detection.Pelt.__init__`, the minimum distance between change points
can be set through the keyword ``'min_size'``; through the parameter ``'jump'``, only change
point indexes multiple of a particular value are considered.
Usage
----------------------------------------------------------------------------------------------------
.. code-block:: python
import numpy as np
import matplotlib.pylab as plt
import ruptures as rpt
# creation of data
n, dim = 500, 3
n_bkps, sigma = 3, 1
signal, b = rpt.pw_constant(n, dim, n_bkps, noise_std=sigma)
# change point detection
model = "l1" # "l2", "rbf"
algo = rpt.Pelt(model=model, min_size=3, jump=5).fit(signal)
my_bkps = algo.predict(pen=3)
# show results
fig, (ax,) = rpt.display(signal, bkps, my_bkps, figsize=(10, 6))
plt.show()
Code explanation
----------------------------------------------------------------------------------------------------
.. autoclass:: ruptures.detection.Pelt
:members:
:special-members: __init__
.. rubric:: References
.. bibliography:: ../biblio.bib
:style: alpha
:cited:
:labelprefix: B
:keyprefix: b-
"""
from math import floor
from ruptures.costs import cost_factory
from ruptures.base import BaseCost, BaseEstimator
class Pelt(BaseEstimator):
"""Penalized change point detection.
For a given model and penalty level, computes the segmentation which minimizes the constrained
sum of approximation errors.
"""
def __init__(self, model="l2", custom_cost=None, min_size=2, jump=5, params=None):
"""Initialize a Pelt instance.
Args:
model (str, optional): segment model, ["l1", "l2", "rbf"]. Not used if ``'custom_cost'`` is not None.
custom_cost (BaseCost, optional): custom cost function. Defaults to None.
min_size (int, optional): minimum segment length.
jump (int, optional): subsample (one every *jump* points).
params (dict, optional): a dictionary of parameters for the cost instance.
Returns:
self
"""
if custom_cost is not None and isinstance(custom_cost, BaseCost):
self.cost = custom_cost
else:
if params is None:
self.cost = cost_factory(model=model)
else:
self.cost = cost_factory(model=model, **params)
self.min_size = max(min_size, self.cost.min_size)
self.jump = jump
self.n_samples = None
def _seg(self, pen):
"""Computes the segmentation for a given penalty using PELT (or a list
of penalties).
Args:
penalty (float): penalty value
Returns:
dict: partition dict {(start, end): cost value,...}
"""
# initialization
# partitions[t] contains the optimal partition of signal[0:t]
partitions = dict() # this dict will be recursively filled
partitions[0] = {(0, 0): 0}
admissible = []
# Recursion
ind = [
k for k in range(0, self.n_samples, self.jump) if k >= self.min_size]
ind += [self.n_samples]
for bkp in ind:
# adding a point to the admissible set from the previous loop.
new_adm_pt = floor((bkp - self.min_size) / self.jump)
new_adm_pt *= self.jump
admissible.append(new_adm_pt)
subproblems = list()
for t in admissible:
# left partition
try:
tmp_partition = partitions[t].copy()
except KeyError: # no partition of 0:t exists
continue
# we update with the right partition
tmp_partition.update({(t, bkp): self.cost.error(t, bkp) + pen})
subproblems.append(tmp_partition)
# finding the optimal partition
partitions[bkp] = min(
subproblems, key=lambda d: sum(d.values()))
# trimming the admissible set
admissible = [t for t, partition in
zip(admissible, subproblems) if
sum(partition.values()) <=
sum(partitions[bkp].values()) + pen]
best_partition = partitions[self.n_samples]
del best_partition[(0, 0)]
return best_partition
def fit(self, signal):
"""Set params.
Args:
signal (array): signal to segment. Shape (n_samples, n_features) or (n_samples,).
Returns:
self
"""
# update params
self.cost.fit(signal)
if signal.ndim == 1:
n_samples, = signal.shape
else:
n_samples, _ = signal.shape
self.n_samples = n_samples
return self
def predict(self, pen):
"""Return the optimal breakpoints.
Must be called after the fit method. The breakpoints are associated with the signal passed
to fit().
Args:
pen (float): penalty value (>0)
Returns:
list: sorted list of breakpoints
"""
partition = self._seg(pen)
bkps = sorted(e for s, e in partition.keys())
return bkps
def fit_predict(self, signal, pen):
"""Fit to the signal and return the optimal breakpoints.
Helper method to call fit and predict once
Args:
signal (array): signal. Shape (n_samples, n_features) or (n_samples,).
pen (float): penalty value (>0)
Returns:
list: sorted list of breakpoints
"""
self.fit(signal)
return self.predict(pen) | /ruptures_headless-1.0a1.post2-py3-none-any.whl/ruptures/detection/pelt.py | 0.935575 | 0.746624 | pelt.py | pypi |
r"""
.. _sec-pw-linear:
Shift in linear model
====================================================================================================
Description
----------------------------------------------------------------------------------------------------
This function simulates a piecewise linear model (see :ref:`sec-linear`).
The covariates standard Gaussian random variables.
The response variable is a (piecewise) linear combination of the covariates.
Usage
----------------------------------------------------------------------------------------------------
Start with the usual imports and create a signal.
.. code-block:: python
import numpy as np
import matplotlib.pylab as plt
import ruptures as rpt
# creation of data
n, dim = 500, 3 # number of samples, dimension of the covariates
n_bkps, sigma = 3, 5 # number of change points, noise standart deviation
signal, bkps = rpt.pw_linear(n, dim, n_bkps, noise_std=sigma)
rpt.display(signal, bkps)
Code explanation
----------------------------------------------------------------------------------------------------
.. autofunction:: ruptures.datasets.pw_linear.pw_linear
"""
import numpy as np
from numpy.random import normal
from . import pw_constant
from ruptures.utils import draw_bkps
def pw_linear(n_samples=200, n_features=1, n_bkps=3, noise_std=None):
"""
Return piecewise linear signal and the associated changepoints.
Args:
n_samples (int, optional): signal length
n_features (int, optional): number of covariates
n_bkps (int, optional): number of change points
noise_std (float, optional): noise std. If None, no noise is added
Returns:
tuple: signal of shape (n_samples, n_features+1), list of breakpoints
"""
covar = normal(size=(n_samples, n_features))
linear_coeff, bkps = pw_constant(n_samples=n_samples,
n_bkps=n_bkps,
n_features=n_features,
noise_std=None)
var = np.sum(linear_coeff * covar, axis=1)
if noise_std is not None:
var += normal(scale=noise_std, size=var.shape)
signal = np.c_[var, covar]
return signal, bkps | /ruptures_headless-1.0a1.post2-py3-none-any.whl/ruptures/datasets/pw_linear.py | 0.89671 | 0.764452 | pw_linear.py | pypi |
r"""
.. _sec-hausdorff:
Hausdorff metric
====================================================================================================
Description
----------------------------------------------------------------------------------------------------
The Hausdorff metric measures the worst prediction error.
Assume a set of change point indexes :math:`t_1,t_2,\dots` and their estimates :math:`\hat{t}_1, \hat{t}_2,\dots`.
The Hausdorff metric is then equal to
.. math:: \text{Hausdorff}(\{t_k\}_k, \{\hat{t}_k\}_k) := \max \{ \max_k \min_l |t_k - \hat{t}_l| \, , \max_k \min_l |\hat{t}_k - t_l|\}.
.. figure:: /images/hausdorff.png
:scale: 50 %
:alt: hausdorff metric
Schematic example: true segmentation in gray, estimated segmentation in dashed lines. Here, Hausdorff is equal to :math:`\max(\Delta t_1, \Delta t_2, \Delta t_3)`.
Usage
----------------------------------------------------------------------------------------------------
Start with the usual imports and create two segmentations to compare.
.. code-block:: python
from ruptures.metrics import hausdorff
bkps1, bkps2 = [100, 200, 500], [105, 115, 350, 400, 500]
print(hausdorff(bkps1, bkps2))
Code explanation
----------------------------------------------------------------------------------------------------
.. autofunction:: ruptures.metrics.hausdorff.hausdorff
"""
import numpy as np
from scipy.spatial.distance import cdist
from ruptures.metrics.sanity_check import sanity_check
def hausdorff(bkps1, bkps2):
"""Compute the Hausdorff distance between changepoints.
Args:
bkps1 (list): list of the last index of each regime.
bkps2 (list): list of the last index of each regime.
Returns:
float: Hausdorff distance.
"""
sanity_check(bkps1, bkps2)
bkps1_arr = np.array(bkps1[:-1]).reshape(-1, 1)
bkps2_arr = np.array(bkps2[:-1]).reshape(-1, 1)
pw_dist = cdist(bkps1_arr, bkps2_arr)
res = max(pw_dist.min(axis=0).max(), pw_dist.min(axis=1).max())
return res | /ruptures_headless-1.0a1.post2-py3-none-any.whl/ruptures/metrics/hausdorff.py | 0.962585 | 0.754779 | hausdorff.py | pypi |
r"""
.. _sec-randindex:
Rand index
====================================================================================================
Description
----------------------------------------------------------------------------------------------------
The Rand index measures the similarity between two segmentations.
Formally, for a signal :math:`\{y_t\}_t` and a segmentation :math:`\mathcal{S}`, denote by :math:`A` the associated membership matrix:
.. math::
\mathcal{A}_{ij} &= 1 \text{ if both samples } y_i \text{ and } y_j \text{ are in the same segment according to } \mathcal{S} \\
&= 0 \quad\text{otherwise}
Let :math:`\hat{\mathcal{S}}` be the estimated segmentation and :math:`\hat{A}`, the associated membership matrix.
Then the Rand index is equal to
.. math::
\frac{\sum_{i<j} \mathbb{1}(A_{ij} = \hat{A}_{ij})}{T(T-1)/2}
where :math:`T` is the number of samples.
It has a value between 0 and 1: 0 indicates that the two segmentations do not agree on any pair of points and 1 indicates that the two segmentations are exactly the same.
.. figure:: /images/randindex.png
:scale: 50 %
:alt: Schematic view of the RandIndex metric
Schematic example: true segmentation in gray, estimated segmentation in dashed lines and their associated membership matrices. Rand index is equal to 1 minus the gray area.
Usage
----------------------------------------------------------------------------------------------------
Start with the usual imports and create two segmentations to compare.
.. code-block:: python
from ruptures.metrics import randindex
bkps1, bkps2 = [100, 200, 500], [105, 115, 350, 400, 500]
print(randindex(bkps1, bkps2))
Code explanation
----------------------------------------------------------------------------------------------------
.. autofunction:: ruptures.metrics.randindex.randindex
"""
from ruptures.metrics import hamming
def randindex(bkps1, bkps2):
"""Rand index for two partitions. The result is scaled to be within 0 and 1.
Args:
bkps1 (list): list of the last index of each regime.
bkps2 (list): list of the last index of each regime.
Returns:
float: Rand index
"""
return 1 - hamming(bkps1, bkps2) | /ruptures_headless-1.0a1.post2-py3-none-any.whl/ruptures/metrics/randindex.py | 0.941862 | 0.873107 | randindex.py | pypi |
r"""
.. _sec-precision:
Precision and recall
====================================================================================================
Description
----------------------------------------------------------------------------------------------------
A true changepoint is declared "detected" (or positive) if there is at least one computed changepoint at less than "margin" points from it.
Formally, assume a set of change point indexes :math:`t_1,t_2,\dots` and their estimates :math:`\hat{t}_1, \hat{t}_2,\dots`
In the context of change point detection, precision and recall are defined as follows:
.. math:: \text{precision}:=|\text{TP}|/|\{\hat{t}_l\}_l| \quad \text{and}\quad\text{recall}:=|\text{TP}|/|\{t_k\}_k|
where, for a given margin :math:`M`, true positives :math:`\text{TP}` are true change points for which there is an estimated one at less than :math:`M` samples, *i.e*
.. math:: \text{TP}:= \{t_k\,|\, \exists\, \hat{t}_l\,\, \text{s.t.}\, |\hat{t}_l - t_k|<M \}.
.. figure:: /images/precision_recall.png
:scale: 50 %
:alt: Schematic view of the precision and recall
Schematic example: true segmentation in gray, estimated segmentation in dashed lines and margin in dashed areas. Here, precision is 2/3 and recall is 2/2.
Usage
----------------------------------------------------------------------------------------------------
Start with the usual imports and create two segmentations to compare.
.. code-block:: python
from ruptures.metrics import precision_recall
bkps1, bkps2 = [100, 200, 500], [105, 115, 350, 400, 500]
p, r = precision_recall(bkps1, bkps2)
print((p, r))
The margin paramater :math:`M` can be changed through the keyword ``'margin'`` (default is 10 samples).
.. code-block:: python
p, r = precision_recall(bkps1, bkps2, margin=10)
print((p, r))
p, r = precision_recall(bkps1, bkps2, margin=20)
print((p, r))
Code explanation
----------------------------------------------------------------------------------------------------
.. autofunction:: ruptures.metrics.precisionrecall.precision_recall
"""
from itertools import groupby, product
import numpy as np
from ruptures.metrics.sanity_check import sanity_check
from ruptures.utils import unzip
def precision_recall(true_bkps, my_bkps, margin=10):
"""Calculate the precision/recall of an estimated segmentation compared with the true segmentation.
Args:
true_bkps (list): list of the last index of each regime (true
partition).
my_bkps (list): list of the last index of each regime (computed
partition).
margin (int, optional): allowed error (in points).
Returns:
tuple: (precision, recall)
"""
sanity_check(true_bkps, my_bkps)
assert margin > 0, "Margin of error must be positive (margin = {})".format(
margin)
if len(my_bkps) == 1:
return 0, 0
used = set()
true_pos = set(true_b
for true_b, my_b in product(true_bkps[:-1], my_bkps[:-1])
if my_b - margin < true_b < my_b + margin and
not (my_b in used or used.add(my_b)))
tp_ = len(true_pos)
precision = tp_ / (len(my_bkps) - 1)
recall = tp_ / (len(true_bkps) - 1)
return precision, recall | /ruptures_headless-1.0a1.post2-py3-none-any.whl/ruptures/metrics/precisionrecall.py | 0.962796 | 0.687717 | precisionrecall.py | pypi |
r"""
.. _sec-display:
Display
====================================================================================================
Description
----------------------------------------------------------------------------------------------------
The function :func:`display` displays a signal and the change points provided in alternating colors.
If another set of change point indexes is provided, they are displayed with dashed vertical dashed lines.
Usage
----------------------------------------------------------------------------------------------------
Start with the usual imports and create a signal.
.. code-block:: python
import numpy as np
import matplotlib.pylab as plt
import ruptures as rpt
# creation of data
n, dim = 500, 2 # number of samples, dimension
n_bkps, sigma = 3, 5 # number of change points, noise standart deviation
signal, bkps = rpt.pw_constant(n, dim, n_bkps, noise_std=sigma)
rpt.display(signal, bkps)
If we computed another set of change points, for instance ``[110, 150, 320, 500]``, we can easily compare the two segmentations.
.. code-block:: python
rpt.display(signal, bkps, [110, 150, 320, 500])
.. figure:: /images/example-display.png
:scale: 50 %
Example output of the function :func:`display`.
Code explanation
----------------------------------------------------------------------------------------------------
.. autofunction:: ruptures.show.display.display
"""
from itertools import cycle
import numpy as np
from ruptures.utils import pairwise
COLOR_CYCLE = ["#4286f4", "#f44174"]
class MatplotlibMissingError(RuntimeError):
pass
def display(signal, true_chg_pts, computed_chg_pts=None, **kwargs):
"""
Display a signal and the change points provided in alternating colors. If another set of change
point is provided, they are displayed with dashed vertical dashed lines.
Args:
signal (array): signal array, shape (n_samples,) or (n_samples, n_features).
true_chg_pts (list): list of change point indexes.
computed_chg_pts (list, optional): list of change point indexes.
Returns:
tuple: (figure, axarr) with a :class:`matplotlib.figure.Figure` object and an array of Axes objects.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise MatplotlibMissingError(
'This feature requires the optional dependency matpotlib, you can install it using `pip install matplotlib`.')
if type(signal) != np.ndarray:
# Try to get array from Pandas dataframe
signal = signal.values
if signal.ndim == 1:
signal = signal.reshape(-1, 1)
n_samples, n_features = signal.shape
# let's set all options
figsize = (10, 2 * n_features) # figure size
alpha = 0.2 # transparency of the colored background
color = "k" # color of the lines indicating the computed_chg_pts
linewidth = 3 # linewidth of the lines indicating the computed_chg_pts
linestyle = "--" # linestyle of the lines indicating the computed_chg_pts
if "figsize" in kwargs:
figsize = kwargs["figsize"]
if "alpha" in kwargs:
alpha = kwargs["alpha"]
if "color" in kwargs:
color = kwargs["color"]
if "linewidth" in kwargs:
linewidth = kwargs["linewidth"]
if "linestyle" in kwargs:
linestyle = kwargs["linestyle"]
fig, axarr = plt.subplots(n_features, figsize=figsize, sharex=True)
if n_features == 1:
axarr = [axarr]
for axe, sig in zip(axarr, signal.T):
color_cycle = cycle(COLOR_CYCLE)
# plot s
axe.plot(range(n_samples), sig)
# color each (true) regime
bkps = [0] + sorted(true_chg_pts)
for (start, end), col in zip(pairwise(bkps), color_cycle):
axe.axvspan(max(0, start - 0.5),
end - 0.5,
facecolor=col, alpha=alpha)
# vertical lines to mark the computed_chg_pts
if computed_chg_pts is not None:
for bkp in computed_chg_pts:
if bkp != 0 and bkp < n_samples:
axe.axvline(x=bkp - 0.5,
color=color,
linewidth=linewidth,
linestyle=linestyle)
fig.tight_layout()
return fig, axarr | /ruptures_headless-1.0a1.post2-py3-none-any.whl/ruptures/show/display.py | 0.913859 | 0.751534 | display.py | pypi |
# rudolf-tools
My personal python tools
## Getting started
To make it easy for you to get started with GitLab, here's a list of recommended next steps.
Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)!
## Add your files
- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files
- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command:
```
cd existing_repo
git remote add origin https://gitlab.com/rudolfluttich/rudolf-tools.git
git branch -M main
git push -uf origin main
```
## Integrate with your tools
- [ ] [Set up project integrations](https://gitlab.com/rudolfluttich/rudolf-tools/-/settings/integrations)
## Collaborate with your team
- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/)
- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html)
- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically)
- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/)
- [ ] [Automatically merge when pipeline succeeds](https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html)
## Test and Deploy
Use the built-in continuous integration in GitLab.
- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/index.html)
- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing(SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)
- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html)
- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/)
- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)
***
# Editing this README
When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thank you to [makeareadme.com](https://www.makeareadme.com/) for this template.
## Suggestions for a good README
Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information.
## Name
Choose a self-explaining name for your project.
## Description
Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors.
## Badges
On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge.
## Visuals
Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method.
## Installation
Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection.
## Usage
Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README.
## Support
Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc.
## Roadmap
If you have ideas for releases in the future, it is a good idea to list them in the README.
## Contributing
State if you are open to contributions and what your requirements are for accepting them.
For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self.
You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser.
## Authors and acknowledgment
Show your appreciation to those who have contributed to the project.
## License
For open source projects, say how it is licensed.
## Project status
If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers.
| /ruru-tools-0.0.1.tar.gz/ruru-tools-0.0.1/README.md | 0.514644 | 0.822759 | README.md | pypi |
import collections
import json
import logging
import optparse
from ruruki import graphs
from flask import Flask
from flask import jsonify
from flask import request
from flask.ext.cors import CORS
from werkzeug.exceptions import NotFound
app = Flask(__name__)
CORS(app)
DB = graphs.Graph()
LOGGER = logging.getLogger(__name__)
def request_wants_json():
"""
Checks if the request is expecting a json response (headers contain Accept:
application/json).
:returns: An answer: this request expects a json response?
:rtype: :class:`bool`
"""
best = request.accept_mimetypes.best_match(
['application/json', 'text/html']
)
return all([
best == 'application/json',
request.accept_mimetypes[best] > request.accept_mimetypes['text/html']
])
def _get_indexes():
filters = [
"__contains",
"__icontains",
"__startswith",
"__istartswith",
"__endswith",
"__iendswith",
"__le",
"__lt",
"__ge",
"__gt",
"__eq",
"__ieq",
"__ne",
"__ine",
]
indexes = collections.defaultdict(list)
for label, property in DB.vertices.get_indexes():
indexes[label].append(property)
return {
"indexes": indexes, "filters": filters
}
def set_db(graph):
"""
Update and set the global ``DB``.
:param graph: Graph to set the ``DB``.
:type graph: :class:`ruruki.interfaces.IGraph`
"""
global DB
LOGGER.info("Setting up db to {0}".format(graph))
DB = graph
def fetch_data(vertex_id=None, levels=0, **kwargs):
"""
Fetch data from the database.
:param vertex_id: Vertex id if you are returning a sinle vertex based on
the id. If :obj:`None`, it will return everything.
:param vertex_id: :class:`int` or :obj:`None`
:param levels: How many level deep to return.
:type levels: :class:`int`
:param kwargs: Key and values filter parameters.
:type kwargs: :class:`str` = :class:`str`
:returns: Returns a iterable of vertices.
:rtype: Iterable of :class:`ruruki.interfaces.IVertex`
"""
db = DB
tunned_kwargs = kwargs.copy()
for key, value in kwargs.items():
if ':' in key:
value = tunned_kwargs.pop(key)
tokens = key.split(':')
tunned_kwargs['label'] = tokens[0]
if value == '*':
continue
tunned_kwargs[tokens[1]] = value
def fetch(v, seen, levels):
if v in seen:
return
seen.add(v)
if levels > 0:
for each in v.get_both_vertices():
fetch(each, seen, levels-1)
if vertex_id is None:
result = list(db.get_vertices(**tunned_kwargs))
LOGGER.info(
"Found {0} vertices based on filter parameters: "
"{1}".format(len(result), tunned_kwargs)
)
return result
seen = set()
LOGGER.info(
"Fetching vertex id {0!r} and {1} levels deep.".format(
vertex_id, levels
)
)
fetch(db.get_vertex(vertex_id), seen=seen, levels=levels)
return seen
def format_data(data, exclude_labels=[], exclude_properties=[]):
"""
Format the data into a json response.
:param data: Data that you are formatting and returning a json response
for.
:type data: Iterable of :class:`ruruki.interfaces.IEntity`
:param exclude_labels: List of labels that you would like to filter out
and exclude for the data returned.
:type exclude_labels: :class:`list` of :class:`str`
:param exclude_properties: List of property key and values to exclude.
:type exclude_properties: :class:`list` of :class:`tuple`
(:class:`str`, :class:`str`)
:returns: Returns the data as a json.
:rtype: :class:`dict`
"""
if not data:
LOGGER.warn("Empty data!")
return jsonify({})
def exclude_by_label(head, tail):
return (
head.label in exclude_labels or
tail.label in exclude_labels
)
def exclude_by_property(head, tail):
vertices = [head, tail]
for key, values in exclude_properties:
for value in values:
matched = list(
EntityContainer.static_filter(
vertices, **{key: value}
)
)
if list(matched):
return True
return False
seen = set()
formatted_data = collections.defaultdict(list)
for each in data:
for edge in each.get_both_edges():
if edge not in seen:
seen.add(edge)
head = edge.get_in_vertex()
tail = edge.get_out_vertex()
if exclude_by_label(head, tail):
continue
if exclude_by_property(head, tail):
continue
formatted_data["edges"].append(edge.as_dict())
formatted_data["vertices"].append(head.as_dict())
formatted_data["vertices"].append(tail.as_dict())
return formatted_data
@app.route("/", methods=["GET"])
def index():
if not request_wants_json():
return app.send_static_file('search.html')
def _parse_filters(raw_filter):
lfilters = {}
for f in raw_filter.split(','):
tokens = f.split('=');
if len(tokens[1]) > 1 and any([
all([
tokens[1][0] == '"',
tokens[1][-1] == '"'
]),
all([
tokens[1][0] == "'",
tokens[1][-1] == "'"
])
]):
lfilters[tokens[0]] = tokens[1][1:-2]
else:
try:
lfilters[tokens[0]] = float(tokens[1])
except:
lfilters[tokens[0]] = tokens[1]
return lfilters
def _filter_operation(container, operation, data):
if operation == 'AND':
return container & data
elif operation == 'OR':
return container | data
else:
return data
db = DB
vertices = set()
edges = set()
if request.args and 'filter' in request.args:
for f in json.loads(request.args.get('filter')):
lfilter = _parse_filters(f[0])
data = db.get_vertices(**lfilter)
vertices = _filter_operation(
vertices, f[1], db.get_vertices(**lfilter)
)
edges = _filter_operation(
edges, f[1], db.get_edges(**lfilter)
)
formatted_vertices = [
each.as_dict()
for each in vertices
]
formatted_edges = [
each.as_dict()
for each in edges
]
return jsonify(
{
'vertices': formatted_vertices,
'edges': formatted_edges
}
)
@app.route('/indexes', methods=['GET'])
def indexes():
if request_wants_json():
return jsonify(_get_indexes())
else:
raise NotFound('Nothing to see here!')
@app.route("/vertices", methods=["GET"])
def vertices():
"""
Return all the vertices including the edges.
:returns: Returns all the vertices as a JSON response.
:rtype: :class:`flask.Response`
"""
if not request_wants_json():
return app.send_static_file('search.html')
results = set()
args = dict(request.args)
if '__cb' in args:
del args['__cb']
exl = args.pop("exclude_label", [])
exp = []
_ = [key for key in args if key.startswith("exclude__")]
for key in _:
exp.append((key.split("__", 1)[-1], args.pop(key)))
if args:
for key in args:
for each in args[key]:
results = results | set(fetch_data(**{key: each}))
else:
results = fetch_data()
# formatted_data = format_data(results, exl, exp)
# return jsonify(formatted_data)
formatted_vertices = [
each.as_dict()
for each in results
]
return jsonify({'vertices': formatted_vertices})
@app.route("/vertices/<int:vertex_id>", methods=["GET"])
def vertex(vertex_id):
"""
Return the vertex including the edges.
:param vertex_id: Vertex id that you are requesting.
:type vertex_id: :class:`int`
:param levels: How many level deep to return.
:type levels: :class:`int`
:returns: Returns all the vertices as a JSON response.
:rtype: :class:`flask.Response`
"""
if not request_wants_json():
return app.send_static_file('graph.html')
args = dict(request.args)
exl = args.pop("exclude_label", [])
exp = []
_ = [key for key in args if key.startswith("exclude__")]
for key in _:
exp.append((key.split("__", 1)[-1], args.pop(key)))
levels = args.pop("levels", [0])
results = fetch_data(vertex_id, levels=int(levels.pop()))
formatted_data = format_data(results, exl, exp)
return jsonify(formatted_data)
def get_cmd_opts_args():
"""
Get the command lines options and arguments.
:returns: Returns the options and arguments.
:rtype: :class:`tuple` (:class:`optparse.Option`, iterable of :class:`str`)
"""
parser = optparse.OptionParser(conflict_handler="resolve")
parser.add_option(
"-h", "--host", dest="host", default="0.0.0.0",
metavar="HOST", help="Host to listen on. Default: %default"
)
parser.add_option(
"-p", "--port", dest="port", type="int", default=5000,
metavar="PORT",
help="Port number to listen web connections. Default: %default"
)
parser.add_option(
"--debug", dest="debug", action="store_true",
help="Run the web service in dubug mode. This will show stacktraces."
)
return parser.parse_args()
def run(host, port, debug, graph=None):
"""
Start a web server.
:param host: Host to listen on.
:type host: :class:`str`
:param port: Port to listen on.
:type port: :class:`int`
:param debug: Enable debugging.
:type debug: :class:`bool`
:param graph: Bind to a graph.
:type debug: :class:`ruruki.interfaces.IGraph`
"""
logging.basicConfig(level=logging.INFO)
logging.info("Starting server {}:{}".format(host, port))
set_db(graph)
app.run(host=host, port=port, debug=debug)
if __name__ == "__main__":
opts, args = get_cmd_opts_args()
run(opts.host, opts.port, opts.debug) | /ruruki-eye-0.1.3.tar.gz/ruruki-eye-0.1.3/ruruki_eye/server.py | 0.71413 | 0.251464 | server.py | pypi |
import hashlib
import time
from collections import namedtuple
from django.core.cache import cache
from django.shortcuts import render
Rate = namedtuple("Rate", "amount duration")
def parse(rate):
ret = None
if rate:
amount, duration = rate.split("/")
amount = int(amount)
duration_map = {"s": 1, "m": 60, "h": 3600, "d": 86400}
if duration not in duration_map:
raise ValueError("Invalid duration: %s" % duration)
duration = duration_map[duration]
ret = Rate(amount, duration)
return ret
def _cache_key(request, *, action, key=None, user=None):
from allauth.account.adapter import get_adapter
if key:
source = ()
elif user or request.user.is_authenticated:
source = ("user", str((user or request.user).pk))
else:
source = ("ip", get_adapter().get_client_ip(request))
keys = ["allauth", "rl", action, *source]
if key is not None:
key_hash = hashlib.sha256(key.encode("utf8")).hexdigest()
keys.append(key_hash)
return ":".join(keys)
def clear(request, *, action, key=None, user=None):
cache_key = _cache_key(request, action=action, key=key, user=user)
cache.delete(cache_key)
def consume(request, *, action, key=None, amount=None, duration=None, user=None):
allowed = True
from allauth.account import app_settings
rate = app_settings.RATE_LIMITS.get(action)
if rate:
rate = parse(rate)
if not amount:
amount = rate.amount
if not duration:
duration = rate.duration
if request.method == "GET" or not amount or not duration:
pass
else:
cache_key = _cache_key(request, action=action, key=key, user=user)
history = cache.get(cache_key, [])
now = time.time()
while history and history[-1] <= now - duration:
history.pop()
allowed = len(history) < amount
if allowed:
history.insert(0, now)
cache.set(cache_key, history, duration)
return allowed
def consume_or_429(request, *args, **kwargs):
from allauth.account import app_settings
if not consume(request, *args, **kwargs):
return render(request, "429." + app_settings.TEMPLATE_EXTENSION, status=429) | /rurusetto_allauth-0.53.0-py3-none-any.whl/allauth/ratelimit.py | 0.442637 | 0.17113 | ratelimit.py | pypi |
from __future__ import absolute_import
from django.core.exceptions import ValidationError
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from ..account import app_settings as account_settings
from ..account.adapter import get_adapter as get_account_adapter
from ..account.app_settings import EmailVerificationMethod
from ..account.models import EmailAddress
from ..account.utils import user_email, user_field, user_username
from ..utils import (
deserialize_instance,
email_address_exists,
import_attribute,
serialize_instance,
valid_email_or_none,
)
from . import app_settings
class DefaultSocialAccountAdapter(object):
error_messages = {
"email_taken": _(
"An account already exists with this e-mail address."
" Please sign in to that account first, then connect"
" your %s account."
)
}
def __init__(self, request=None):
self.request = request
def pre_social_login(self, request, sociallogin):
"""
Invoked just after a user successfully authenticates via a
social provider, but before the login is actually processed
(and before the pre_social_login signal is emitted).
You can use this hook to intervene, e.g. abort the login by
raising an ImmediateHttpResponse
Why both an adapter hook and the signal? Intervening in
e.g. the flow from within a signal handler is bad -- multiple
handlers may be active and are executed in undetermined order.
"""
pass
def authentication_error(
self,
request,
provider_id,
error=None,
exception=None,
extra_context=None,
):
"""
Invoked when there is an error in the authentication cycle. In this
case, pre_social_login will not be reached.
You can use this hook to intervene, e.g. redirect to an
educational flow by raising an ImmediateHttpResponse.
"""
pass
def new_user(self, request, sociallogin):
"""
Instantiates a new User instance.
"""
return get_account_adapter().new_user(request)
def save_user(self, request, sociallogin, form=None):
"""
Saves a newly signed up social login. In case of auto-signup,
the signup form is not available.
"""
u = sociallogin.user
u.set_unusable_password()
if form:
get_account_adapter().save_user(request, u, form)
else:
get_account_adapter().populate_username(request, u)
sociallogin.save(request)
return u
def populate_user(self, request, sociallogin, data):
"""
Hook that can be used to further populate the user instance.
For convenience, we populate several common fields.
Note that the user instance being populated represents a
suggested User instance that represents the social user that is
in the process of being logged in.
The User instance need not be completely valid and conflict
free. For example, verifying whether or not the username
already exists, is not a responsibility.
"""
username = data.get("username")
first_name = data.get("first_name")
last_name = data.get("last_name")
email = data.get("email")
name = data.get("name")
user = sociallogin.user
user_username(user, username or "")
user_email(user, valid_email_or_none(email) or "")
name_parts = (name or "").partition(" ")
user_field(user, "first_name", first_name or name_parts[0])
user_field(user, "last_name", last_name or name_parts[2])
return user
def get_connect_redirect_url(self, request, socialaccount):
"""
Returns the default URL to redirect to after successfully
connecting a social account.
"""
assert request.user.is_authenticated
url = reverse("socialaccount_connections")
return url
def validate_disconnect(self, account, accounts):
"""
Validate whether or not the socialaccount account can be
safely disconnected.
"""
if len(accounts) == 1:
# No usable password would render the local account unusable
if not account.user.has_usable_password():
raise ValidationError(_("Your account has no password set up."))
# No email address, no password reset
if app_settings.EMAIL_VERIFICATION == EmailVerificationMethod.MANDATORY:
if not EmailAddress.objects.filter(
user=account.user, verified=True
).exists():
raise ValidationError(
_("Your account has no verified e-mail address.")
)
def is_auto_signup_allowed(self, request, sociallogin):
# If email is specified, check for duplicate and if so, no auto signup.
auto_signup = app_settings.AUTO_SIGNUP
if auto_signup:
email = user_email(sociallogin.user)
# Let's check if auto_signup is really possible...
if email:
if account_settings.UNIQUE_EMAIL:
if email_address_exists(email):
# Oops, another user already has this address.
# We cannot simply connect this social account
# to the existing user. Reason is that the
# email address may not be verified, meaning,
# the user may be a hacker that has added your
# email address to their account in the hope
# that you fall in their trap. We cannot
# check on 'email_address.verified' either,
# because 'email_address' is not guaranteed to
# be verified.
auto_signup = False
# FIXME: We redirect to signup form -- user will
# see email address conflict only after posting
# whereas we detected it here already.
elif app_settings.EMAIL_REQUIRED:
# Nope, email is required and we don't have it yet...
auto_signup = False
return auto_signup
def is_open_for_signup(self, request, sociallogin):
"""
Checks whether or not the site is open for signups.
Next to simply returning True/False you can also intervene the
regular flow by raising an ImmediateHttpResponse
"""
return get_account_adapter(request).is_open_for_signup(request)
def get_signup_form_initial_data(self, sociallogin):
user = sociallogin.user
initial = {
"email": user_email(user) or "",
"username": user_username(user) or "",
"first_name": user_field(user, "first_name") or "",
"last_name": user_field(user, "last_name") or "",
}
return initial
def deserialize_instance(self, model, data):
return deserialize_instance(model, data)
def serialize_instance(self, instance):
return serialize_instance(instance)
def get_app(self, request, provider, config=None):
# NOTE: Avoid loading models at top due to registry boot...
from allauth.socialaccount.models import SocialApp
config = config or app_settings.PROVIDERS.get(provider, {}).get("APP")
if config:
app = SocialApp(provider=provider)
for field in ["client_id", "secret", "key", "certificate_key"]:
setattr(app, field, config.get(field))
else:
app = SocialApp.objects.get_current(provider, request)
return app
def get_adapter(request=None):
return import_attribute(app_settings.ADAPTER)(request) | /rurusetto_allauth-0.53.0-py3-none-any.whl/allauth/socialaccount/adapter.py | 0.563858 | 0.150528 | adapter.py | pypi |
import requests
from allauth.socialaccount import app_settings
from allauth.socialaccount.providers.gitlab.provider import GitLabProvider
from allauth.socialaccount.providers.oauth2.client import OAuth2Error
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
def _check_errors(response):
# 403 error's are presented as user-facing errors
if response.status_code == 403:
msg = response.content
raise OAuth2Error("Invalid data from GitLab API: %r" % (msg))
try:
data = response.json()
except ValueError: # JSONDecodeError on py3
raise OAuth2Error("Invalid JSON from GitLab API: %r" % (response.text))
if response.status_code >= 400 or "error" in data:
# For errors, we expect the following format:
# {"error": "error_name", "error_description": "Oops!"}
# For example, if the token is not valid, we will get:
# {"message": "status_code - message"}
error = data.get("error", "") or response.status_code
desc = data.get("error_description", "") or data.get("message", "")
raise OAuth2Error("GitLab error: %s (%s)" % (error, desc))
# The expected output from the API follows this format:
# {"id": 12345, ...}
if "id" not in data:
# If the id is not present, the output is not usable (no UID)
raise OAuth2Error("Invalid data from GitLab API: %r" % (data))
return data
class GitLabOAuth2Adapter(OAuth2Adapter):
provider_id = GitLabProvider.id
provider_default_url = "https://gitlab.com"
provider_api_version = "v4"
settings = app_settings.PROVIDERS.get(provider_id, {})
provider_base_url = settings.get("GITLAB_URL", provider_default_url)
access_token_url = "{0}/oauth/token".format(provider_base_url)
authorize_url = "{0}/oauth/authorize".format(provider_base_url)
profile_url = "{0}/api/{1}/user".format(provider_base_url, provider_api_version)
def complete_login(self, request, app, token, response):
response = requests.get(self.profile_url, params={"access_token": token.token})
data = _check_errors(response)
return self.get_provider().sociallogin_from_response(request, data)
oauth2_login = OAuth2LoginView.adapter_view(GitLabOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(GitLabOAuth2Adapter) | /rurusetto_allauth-0.53.0-py3-none-any.whl/allauth/socialaccount/providers/gitlab/views.py | 0.438064 | 0.219149 | views.py | pypi |
import os
from django.utils.translation import get_language, to_locale
def _build_locale_table(filename_or_file):
"""
Parses the FacebookLocales.xml file and builds a dict relating every
available language ('en, 'es, 'zh', ...) with a list of available regions
for that language ('en' -> 'US', 'EN') and an (arbitrary) default region.
"""
# Require the XML parser module only if we want the default mapping
from xml.dom.minidom import parse
dom = parse(filename_or_file)
reps = dom.getElementsByTagName("representation")
locs = map(lambda r: r.childNodes[0].data, reps)
locale_map = {}
for loc in locs:
lang, _, reg = loc.partition("_")
lang_map = locale_map.setdefault(lang, {"regs": [], "default": reg})
lang_map["regs"].append(reg)
# Default region overrides (arbitrary)
locale_map["en"]["default"] = "US"
# Special case: Use es_ES for Spain and es_LA for everything else
locale_map["es"]["default"] = "LA"
locale_map["zh"]["default"] = "CN"
locale_map["fr"]["default"] = "FR"
locale_map["pt"]["default"] = "PT"
return locale_map
def get_default_locale_callable():
"""
Wrapper function so that the default mapping is only built when needed
"""
exec_dir = os.path.dirname(os.path.realpath(__file__))
xml_path = os.path.join(exec_dir, "data", "FacebookLocales.xml")
fb_locales = _build_locale_table(xml_path)
def default_locale(request):
"""
Guess an appropriate FB locale based on the active Django locale.
If the active locale is available, it is returned. Otherwise,
it tries to return another locale with the same language. If there
isn't one available, 'en_US' is returned.
"""
chosen = "en_US"
language = get_language()
if language:
locale = to_locale(language)
lang, _, reg = locale.partition("_")
lang_map = fb_locales.get(lang)
if lang_map is not None:
if reg in lang_map["regs"]:
chosen = lang + "_" + reg
else:
chosen = lang + "_" + lang_map["default"]
return chosen
return default_locale | /rurusetto_allauth-0.53.0-py3-none-any.whl/allauth/socialaccount/providers/facebook/locale.py | 0.434221 | 0.175786 | locale.py | pypi |
from allauth.account.models import EmailAddress
from allauth.socialaccount.providers.amazon_cognito.utils import (
convert_to_python_bool_if_value_is_json_string_bool,
)
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class AmazonCognitoAccount(ProviderAccount):
def to_str(self):
dflt = super(AmazonCognitoAccount, self).to_str()
return self.account.extra_data.get("username", dflt)
def get_avatar_url(self):
return self.account.extra_data.get("picture")
def get_profile_url(self):
return self.account.extra_data.get("profile")
class AmazonCognitoProvider(OAuth2Provider):
id = "amazon_cognito"
name = "Amazon Cognito"
account_class = AmazonCognitoAccount
def extract_uid(self, data):
return str(data["sub"])
def extract_common_fields(self, data):
return {
"email": data.get("email"),
"first_name": data.get("given_name"),
"last_name": data.get("family_name"),
}
def get_default_scope(self):
return ["openid", "profile", "email"]
def extract_email_addresses(self, data):
email = data.get("email")
verified = convert_to_python_bool_if_value_is_json_string_bool(
data.get("email_verified", False)
)
return (
[EmailAddress(email=email, verified=verified, primary=True)]
if email
else []
)
def extract_extra_data(self, data):
return {
"address": data.get("address"),
"birthdate": data.get("birthdate"),
"gender": data.get("gender"),
"locale": data.get("locale"),
"middlename": data.get("middlename"),
"nickname": data.get("nickname"),
"phone_number": data.get("phone_number"),
"phone_number_verified": convert_to_python_bool_if_value_is_json_string_bool(
data.get("phone_number_verified")
),
"picture": data.get("picture"),
"preferred_username": data.get("preferred_username"),
"profile": data.get("profile"),
"website": data.get("website"),
"zoneinfo": data.get("zoneinfo"),
}
@classmethod
def get_slug(cls):
# IMPORTANT: Amazon Cognito does not support `_` characters
# as part of their redirect URI.
return super(AmazonCognitoProvider, cls).get_slug().replace("_", "-")
provider_classes = [AmazonCognitoProvider] | /rurusetto_allauth-0.53.0-py3-none-any.whl/allauth/socialaccount/providers/amazon_cognito/provider.py | 0.492676 | 0.273544 | provider.py | pypi |
from allauth.account.models import EmailAddress
from allauth.socialaccount import app_settings
from allauth.socialaccount.adapter import get_adapter
class ProviderException(Exception):
pass
class Provider(object):
slug = None
def __init__(self, request):
self.request = request
@classmethod
def get_slug(cls):
return cls.slug or cls.id
def get_login_url(self, request, next=None, **kwargs):
"""
Builds the URL to redirect to when initiating a login for this
provider.
"""
raise NotImplementedError("get_login_url() for " + self.name)
def get_app(self, request, config=None):
adapter = get_adapter(request)
return adapter.get_app(request, self.id, config=config)
def media_js(self, request):
"""
Some providers may require extra scripts (e.g. a Facebook connect)
"""
return ""
def wrap_account(self, social_account):
return self.account_class(social_account)
def get_settings(self):
return app_settings.PROVIDERS.get(self.id, {})
def sociallogin_from_response(self, request, response):
"""
Instantiates and populates a `SocialLogin` model based on the data
retrieved in `response`. The method does NOT save the model to the
DB.
Data for `SocialLogin` will be extracted from `response` with the
help of the `.extract_uid()`, `.extract_extra_data()`,
`.extract_common_fields()`, and `.extract_email_addresses()`
methods.
:param request: a Django `HttpRequest` object.
:param response: object retrieved via the callback response of the
social auth provider.
:return: A populated instance of the `SocialLogin` model (unsaved).
"""
# NOTE: Avoid loading models at top due to registry boot...
from allauth.socialaccount.models import SocialAccount, SocialLogin
adapter = get_adapter(request)
uid = self.extract_uid(response)
extra_data = self.extract_extra_data(response)
common_fields = self.extract_common_fields(response)
socialaccount = SocialAccount(extra_data=extra_data, uid=uid, provider=self.id)
email_addresses = self.extract_email_addresses(response)
self.cleanup_email_addresses(common_fields.get("email"), email_addresses)
sociallogin = SocialLogin(
account=socialaccount, email_addresses=email_addresses
)
user = sociallogin.user = adapter.new_user(request, sociallogin)
user.set_unusable_password()
adapter.populate_user(request, sociallogin, common_fields)
return sociallogin
def extract_uid(self, data):
"""
Extracts the unique user ID from `data`
"""
raise NotImplementedError(
"The provider must implement the `extract_uid()` method"
)
def extract_extra_data(self, data):
"""
Extracts fields from `data` that will be stored in
`SocialAccount`'s `extra_data` JSONField.
:return: any JSON-serializable Python structure.
"""
return data
def extract_common_fields(self, data):
"""
Extracts fields from `data` that will be used to populate the
`User` model in the `SOCIALACCOUNT_ADAPTER`'s `populate_user()`
method.
For example:
{'first_name': 'John'}
:return: dictionary of key-value pairs.
"""
return {}
def cleanup_email_addresses(self, email, addresses):
# Move user.email over to EmailAddress
if email and email.lower() not in [a.email.lower() for a in addresses]:
addresses.append(EmailAddress(email=email, verified=False, primary=True))
# Force verified emails
settings = self.get_settings()
verified_email = settings.get("VERIFIED_EMAIL", False)
if verified_email:
for address in addresses:
address.verified = True
def extract_email_addresses(self, data):
"""
For example:
[EmailAddress(email='john@example.com',
verified=True,
primary=True)]
"""
return []
@classmethod
def get_package(cls):
pkg = getattr(cls, "package", None)
if not pkg:
pkg = cls.__module__.rpartition(".")[0]
return pkg
class ProviderAccount(object):
def __init__(self, social_account):
self.account = social_account
def get_profile_url(self):
return None
def get_avatar_url(self):
return None
def get_brand(self):
"""
Returns a dict containing an id and name identifying the
brand. Useful when displaying logos next to accounts in
templates.
For most providers, these are identical to the provider. For
OpenID however, the brand can derived from the OpenID identity
url.
"""
provider = self.account.get_provider()
return dict(id=provider.id, name=provider.name)
def __str__(self):
return self.to_str()
def to_str(self):
"""
This did not use to work in the past due to py2 compatibility:
class GoogleAccount(ProviderAccount):
def __str__(self):
dflt = super(GoogleAccount, self).__str__()
return self.account.extra_data.get('name', dflt)
So we have this method `to_str` that can be overridden in a conventional
fashion, without having to worry about it.
"""
return self.get_brand()["name"] | /rurusetto_allauth-0.53.0-py3-none-any.whl/allauth/socialaccount/providers/base/provider.py | 0.819929 | 0.201204 | provider.py | pypi |
import requests
from django.conf import settings
from allauth.socialaccount.providers.oauth2.client import OAuth2Error
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import BattleNetProvider
class Region:
APAC = "apac"
CN = "cn"
EU = "eu"
KR = "kr"
SEA = "sea"
TW = "tw"
US = "us"
def _check_errors(response):
try:
data = response.json()
except ValueError: # JSONDecodeError on py3
raise OAuth2Error("Invalid JSON from Battle.net API: %r" % (response.text))
if response.status_code >= 400 or "error" in data:
# For errors, we expect the following format:
# {"error": "error_name", "error_description": "Oops!"}
# For example, if the token is not valid, we will get:
# {
# "error": "invalid_token",
# "error_description": "Invalid access token: abcdef123456"
# }
# For the profile API, this may also look like the following:
# {"code": 403, "type": "Forbidden", "detail": "Account Inactive"}
error = data.get("error", "") or data.get("type", "")
desc = data.get("error_description", "") or data.get("detail", "")
raise OAuth2Error("Battle.net error: %s (%s)" % (error, desc))
# The expected output from the API follows this format:
# {"id": 12345, "battletag": "Example#12345"}
# The battletag is optional.
if "id" not in data:
# If the id is not present, the output is not usable (no UID)
raise OAuth2Error("Invalid data from Battle.net API: %r" % (data))
return data
class BattleNetOAuth2Adapter(OAuth2Adapter):
"""
OAuth2 adapter for Battle.net
https://dev.battle.net/docs/read/oauth
Region is set to us by default, but can be overridden with the
`region` GET parameter when performing a login.
Can be any of eu, us, kr, sea, tw or cn
"""
provider_id = BattleNetProvider.id
valid_regions = (
Region.APAC,
Region.CN,
Region.EU,
Region.KR,
Region.SEA,
Region.TW,
Region.US,
)
@property
def battlenet_region(self):
# Check by URI query parameter first.
region = self.request.GET.get("region", "").lower()
if region == Region.SEA:
# South-East Asia uses the same region as US everywhere
return Region.US
if region in self.valid_regions:
return region
# Second, check the provider settings.
region = (
getattr(settings, "SOCIALACCOUNT_PROVIDERS", {})
.get("battlenet", {})
.get("REGION", "us")
)
if region in self.valid_regions:
return region
return Region.US
@property
def battlenet_base_url(self):
region = self.battlenet_region
if region == Region.CN:
return "https://www.battlenet.com.cn"
return "https://%s.battle.net" % (region)
@property
def access_token_url(self):
return self.battlenet_base_url + "/oauth/token"
@property
def authorize_url(self):
return self.battlenet_base_url + "/oauth/authorize"
@property
def profile_url(self):
return self.battlenet_base_url + "/oauth/userinfo"
def complete_login(self, request, app, token, **kwargs):
params = {"access_token": token.token}
response = requests.get(self.profile_url, params=params)
data = _check_errors(response)
# Add the region to the data so that we can have it in `extra_data`.
data["region"] = self.battlenet_region
return self.get_provider().sociallogin_from_response(request, data)
def get_callback_url(self, request, app):
r = super(BattleNetOAuth2Adapter, self).get_callback_url(request, app)
region = request.GET.get("region", "").lower()
# Pass the region down to the callback URL if we specified it
if region and region in self.valid_regions:
r += "?region=%s" % (region)
return r
oauth2_login = OAuth2LoginView.adapter_view(BattleNetOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(BattleNetOAuth2Adapter) | /rurusetto_allauth-0.53.0-py3-none-any.whl/allauth/socialaccount/providers/battlenet/views.py | 0.598899 | 0.283924 | views.py | pypi |
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class DataportenAccount(ProviderAccount):
def get_avatar_url(self):
"""
Returns a valid URL to an 128x128 .png photo of the user
"""
# Documentation for user profile photos can be found here:
# https://docs.dataporten.no/docs/oauth-authentication/
base_url = "https://api.dataporten.no/userinfo/v1/user/media/"
return base_url + self.account.extra_data["profilephoto"]
def to_str(self):
"""
Returns string representation of a social account. Includes the name
of the user.
"""
dflt = super(DataportenAccount, self).to_str()
return "%s (%s)" % (
self.account.extra_data.get("name", ""),
dflt,
)
class DataportenProvider(OAuth2Provider):
id = "dataporten"
name = "Dataporten"
account_class = DataportenAccount
def extract_uid(self, data):
"""
Returns the primary user identifier, an UUID string
See: https://docs.dataporten.no/docs/userid/
"""
return data["userid"]
def extract_extra_data(self, data):
"""
Extracts fields from `data` that will be stored in
`SocialAccount`'s `extra_data` JSONField.
All the necessary data extraction has already been done in the
complete_login()-view, so we can just return the data.
PS: This is default behaviour, so we did not really need to define
this function, but it is included for documentation purposes.
Typical return dict:
{
"userid": "76a7a061-3c55-430d-8ee0-6f82ec42501f",
"userid_sec": ["feide:andreas@uninett.no"],
"name": "Andreas \u00c5kre Solberg",
"email": "andreas.solberg@uninett.no",
"profilephoto": "p:a3019954-902f-45a3-b4ee-bca7b48ab507",
}
"""
return data
def extract_common_fields(self, data):
"""
This function extracts information from the /userinfo endpoint which
will be consumed by allauth.socialaccount.adapter.populate_user().
Look there to find which key-value pairs that should be saved in the
returned dict.
Typical return dict:
{
"userid": "76a7a061-3c55-430d-8ee0-6f82ec42501f",
"userid_sec": ["feide:andreas@uninett.no"],
"name": "Andreas \u00c5kre Solberg",
"email": "andreas.solberg@uninett.no",
"profilephoto": "p:a3019954-902f-45a3-b4ee-bca7b48ab507",
"username": "andreas",
}
"""
# Make shallow copy to prevent possible mutability issues
data = dict(data)
# If a Feide username is available, use it. If not, use the "username"
# of the email-address
for userid in data.get("userid_sec"):
usertype, username = userid.split(":")
if usertype == "feide":
data["username"] = username.split("@")[0]
break
else:
# Only entered if break is not executed above
data["username"] = data.get("email").split("@")[0]
return data
provider_classes = [DataportenProvider] | /rurusetto_allauth-0.53.0-py3-none-any.whl/allauth/socialaccount/providers/dataporten/provider.py | 0.712032 | 0.325554 | provider.py | pypi |
import unicodedata
from collections import OrderedDict
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from django.core.exceptions import FieldDoesNotExist, ValidationError
from django.db import models
from django.db.models import Q
from django.utils.encoding import force_str
from django.utils.http import base36_to_int, int_to_base36, urlencode
from allauth.account import app_settings, signals
from allauth.account.adapter import get_adapter
from allauth.exceptions import ImmediateHttpResponse
from allauth.utils import (
get_request_param,
get_user_model,
import_callable,
valid_email_or_none,
)
def _unicode_ci_compare(s1, s2):
"""
Perform case-insensitive comparison of two identifiers, using the
recommended algorithm from Unicode Technical Report 36, section
2.11.2(B)(2).
"""
norm_s1 = unicodedata.normalize("NFKC", s1).casefold()
norm_s2 = unicodedata.normalize("NFKC", s2).casefold()
return norm_s1 == norm_s2
def get_next_redirect_url(request, redirect_field_name="next"):
"""
Returns the next URL to redirect to, if it was explicitly passed
via the request.
"""
redirect_to = get_request_param(request, redirect_field_name)
if not get_adapter(request).is_safe_url(redirect_to):
redirect_to = None
return redirect_to
def get_login_redirect_url(request, url=None, redirect_field_name="next", signup=False):
ret = url
if url and callable(url):
# In order to be able to pass url getters around that depend
# on e.g. the authenticated state.
ret = url()
if not ret:
ret = get_next_redirect_url(request, redirect_field_name=redirect_field_name)
if not ret:
if signup:
ret = get_adapter(request).get_signup_redirect_url(request)
else:
ret = get_adapter(request).get_login_redirect_url(request)
return ret
_user_display_callable = None
def logout_on_password_change(request, user):
# Since it is the default behavior of Django to invalidate all sessions on
# password change, this function actually has to preserve the session when
# logout isn't desired.
if not app_settings.LOGOUT_ON_PASSWORD_CHANGE:
update_session_auth_hash(request, user)
def default_user_display(user):
if app_settings.USER_MODEL_USERNAME_FIELD:
return getattr(user, app_settings.USER_MODEL_USERNAME_FIELD)
else:
return force_str(user)
def user_display(user):
global _user_display_callable
if not _user_display_callable:
f = getattr(settings, "ACCOUNT_USER_DISPLAY", default_user_display)
_user_display_callable = import_callable(f)
return _user_display_callable(user)
def user_field(user, field, *args):
"""
Gets or sets (optional) user model fields. No-op if fields do not exist.
"""
if not field:
return
User = get_user_model()
try:
field_meta = User._meta.get_field(field)
max_length = field_meta.max_length
except FieldDoesNotExist:
if not hasattr(user, field):
return
max_length = None
if args:
# Setter
v = args[0]
if v:
v = v[0:max_length]
setattr(user, field, v)
else:
# Getter
return getattr(user, field)
def user_username(user, *args):
if args and not app_settings.PRESERVE_USERNAME_CASING and args[0]:
args = [args[0].lower()]
return user_field(user, app_settings.USER_MODEL_USERNAME_FIELD, *args)
def user_email(user, *args):
return user_field(user, app_settings.USER_MODEL_EMAIL_FIELD, *args)
def has_verified_email(user, email=None):
from .models import EmailAddress
emailaddress = None
if email:
ret = False
try:
emailaddress = EmailAddress.objects.get_for_user(user, email)
ret = emailaddress.verified
except EmailAddress.DoesNotExist:
pass
else:
ret = EmailAddress.objects.filter(user=user, verified=True).exists()
return ret
def perform_login(
request,
user,
email_verification,
redirect_url=None,
signal_kwargs=None,
signup=False,
email=None,
):
"""
Keyword arguments:
signup -- Indicates whether or not sending the
email is essential (during signup), or if it can be skipped (e.g. in
case email verification is optional and we are only logging in).
"""
# Local users are stopped due to form validation checking
# is_active, yet, adapter methods could toy with is_active in a
# `user_signed_up` signal. Furthermore, social users should be
# stopped anyway.
adapter = get_adapter(request)
try:
hook_kwargs = dict(
email_verification=email_verification,
redirect_url=redirect_url,
signal_kwargs=signal_kwargs,
signup=signup,
email=email,
)
response = adapter.pre_login(request, user, **hook_kwargs)
if response:
return response
adapter.login(request, user)
response = adapter.post_login(request, user, **hook_kwargs)
if response:
return response
except ImmediateHttpResponse as e:
response = e.response
return response
def complete_signup(request, user, email_verification, success_url, signal_kwargs=None):
if signal_kwargs is None:
signal_kwargs = {}
signals.user_signed_up.send(
sender=user.__class__, request=request, user=user, **signal_kwargs
)
return perform_login(
request,
user,
email_verification=email_verification,
signup=True,
redirect_url=success_url,
signal_kwargs=signal_kwargs,
)
def cleanup_email_addresses(request, addresses):
"""
Takes a list of EmailAddress instances and cleans it up, making
sure only valid ones remain, without multiple primaries etc.
Order is important: e.g. if multiple primary e-mail addresses
exist, the first one encountered will be kept as primary.
"""
from .models import EmailAddress
adapter = get_adapter(request)
# Let's group by `email`
e2a = OrderedDict() # maps email to EmailAddress
primary_addresses = []
verified_addresses = []
primary_verified_addresses = []
for address in addresses:
# Pick up only valid ones...
email = valid_email_or_none(address.email)
if not email:
continue
# ... and non-conflicting ones...
if (
app_settings.UNIQUE_EMAIL
and EmailAddress.objects.filter(email__iexact=email).exists()
):
continue
a = e2a.get(email.lower())
if a:
a.primary = a.primary or address.primary
a.verified = a.verified or address.verified
else:
a = address
a.verified = a.verified or adapter.is_email_verified(request, a.email)
e2a[email.lower()] = a
if a.primary:
primary_addresses.append(a)
if a.verified:
primary_verified_addresses.append(a)
if a.verified:
verified_addresses.append(a)
# Now that we got things sorted out, let's assign a primary
if primary_verified_addresses:
primary_address = primary_verified_addresses[0]
elif verified_addresses:
# Pick any verified as primary
primary_address = verified_addresses[0]
elif primary_addresses:
# Okay, let's pick primary then, even if unverified
primary_address = primary_addresses[0]
elif e2a:
# Pick the first
primary_address = e2a.keys()[0]
else:
# Empty
primary_address = None
# There can only be one primary
for a in e2a.values():
a.primary = primary_address.email.lower() == a.email.lower()
return list(e2a.values()), primary_address
def setup_user_email(request, user, addresses):
"""
Creates proper EmailAddress for the user that was just signed
up. Only sets up, doesn't do any other handling such as sending
out email confirmation mails etc.
"""
from .models import EmailAddress
assert not EmailAddress.objects.filter(user=user).exists()
priority_addresses = []
# Is there a stashed e-mail?
adapter = get_adapter(request)
stashed_email = adapter.unstash_verified_email(request)
if stashed_email:
priority_addresses.append(
EmailAddress(user=user, email=stashed_email, primary=True, verified=True)
)
email = user_email(user)
if email:
priority_addresses.append(
EmailAddress(user=user, email=email, primary=True, verified=False)
)
addresses, primary = cleanup_email_addresses(
request, priority_addresses + addresses
)
for a in addresses:
a.user = user
a.save()
EmailAddress.objects.fill_cache_for_user(user, addresses)
if primary and email and email.lower() != primary.email.lower():
user_email(user, primary.email)
user.save()
return primary
def send_email_confirmation(request, user, signup=False, email=None):
"""
E-mail verification mails are sent:
a) Explicitly: when a user signs up
b) Implicitly: when a user attempts to log in using an unverified
e-mail while EMAIL_VERIFICATION is mandatory.
Especially in case of b), we want to limit the number of mails
sent (consider a user retrying a few times), which is why there is
a cooldown period before sending a new mail. This cooldown period
can be configured in ACCOUNT_EMAIL_CONFIRMATION_COOLDOWN setting.
"""
from .models import EmailAddress
adapter = get_adapter(request)
if not email:
email = user_email(user)
if email:
try:
email_address = EmailAddress.objects.get_for_user(user, email)
if not email_address.verified:
send_email = adapter.should_send_confirmation_mail(
request, email_address
)
if send_email:
email_address.send_confirmation(request, signup=signup)
else:
send_email = False
except EmailAddress.DoesNotExist:
send_email = True
email_address = EmailAddress.objects.add_email(
request, user, email, signup=signup, confirm=True
)
assert email_address
# At this point, if we were supposed to send an email we have sent it.
if send_email:
adapter.add_message(
request,
messages.INFO,
"account/messages/email_confirmation_sent.txt",
{"email": email},
)
if signup:
adapter.stash_user(request, user_pk_to_url_str(user))
def sync_user_email_addresses(user):
"""
Keep user.email in sync with user.emailaddress_set.
Under some circumstances the user.email may not have ended up as
an EmailAddress record, e.g. in the case of manually created admin
users.
"""
from .models import EmailAddress
email = user_email(user)
if (
email
and not EmailAddress.objects.filter(user=user, email__iexact=email).exists()
):
if (
app_settings.UNIQUE_EMAIL
and EmailAddress.objects.filter(email__iexact=email).exists()
):
# Bail out
return
# get_or_create() to gracefully handle races
EmailAddress.objects.get_or_create(
user=user, email=email, defaults={"primary": False, "verified": False}
)
def filter_users_by_username(*username):
if app_settings.PRESERVE_USERNAME_CASING:
qlist = [
Q(**{app_settings.USER_MODEL_USERNAME_FIELD + "__iexact": u})
for u in username
]
q = qlist[0]
for q2 in qlist[1:]:
q = q | q2
ret = get_user_model()._default_manager.filter(q)
else:
ret = get_user_model()._default_manager.filter(
**{
app_settings.USER_MODEL_USERNAME_FIELD
+ "__in": [u.lower() for u in username]
}
)
return ret
def filter_users_by_email(email, is_active=None):
"""Return list of users by email address
Typically one, at most just a few in length. First we look through
EmailAddress table, than customisable User model table. Add results
together avoiding SQL joins and deduplicate.
"""
from .models import EmailAddress
User = get_user_model()
mails = EmailAddress.objects.filter(email__iexact=email)
if is_active is not None:
mails = mails.filter(user__is_active=is_active)
users = []
for e in mails.prefetch_related("user"):
if _unicode_ci_compare(e.email, email):
users.append(e.user)
if app_settings.USER_MODEL_EMAIL_FIELD:
q_dict = {app_settings.USER_MODEL_EMAIL_FIELD + "__iexact": email}
user_qs = User.objects.filter(**q_dict)
if is_active is not None:
user_qs = user_qs.filter(is_active=is_active)
for user in user_qs.iterator():
user_email = getattr(user, app_settings.USER_MODEL_EMAIL_FIELD)
if _unicode_ci_compare(user_email, email):
users.append(user)
return list(set(users))
def passthrough_next_redirect_url(request, url, redirect_field_name):
assert url.find("?") < 0 # TODO: Handle this case properly
next_url = get_next_redirect_url(request, redirect_field_name)
if next_url:
url = url + "?" + urlencode({redirect_field_name: next_url})
return url
def user_pk_to_url_str(user):
"""
This should return a string.
"""
User = get_user_model()
if issubclass(type(User._meta.pk), models.UUIDField):
if isinstance(user.pk, str):
return user.pk
return user.pk.hex
ret = user.pk
if isinstance(ret, int):
ret = int_to_base36(user.pk)
return str(ret)
def url_str_to_user_pk(s):
User = get_user_model()
# TODO: Ugh, isn't there a cleaner way to determine whether or not
# the PK is a str-like field?
remote_field = getattr(User._meta.pk, "remote_field", None)
if remote_field and getattr(remote_field, "to", None):
pk_field = User._meta.pk.remote_field.to._meta.pk
else:
pk_field = User._meta.pk
if issubclass(type(pk_field), models.UUIDField):
return pk_field.to_python(s)
try:
pk_field.to_python("a")
pk = s
except ValidationError:
pk = base36_to_int(s)
return pk | /rurusetto_allauth-0.53.0-py3-none-any.whl/allauth/account/utils.py | 0.532182 | 0.198802 | utils.py | pypi |
from __future__ import absolute_import, unicode_literals, print_function
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
import warnings
import functools
from collections import namedtuple
from .tagset import Tag
Token = namedtuple('Token', 'text annotations')
Annotation = namedtuple('Annotation', 'lex gr joined')
FlatToken = namedtuple('FlatToken', 'text lex gr joined')
def parse_xml(source):
"""
Parse XML file ``source`` (which can be obtained from ruscorpora.ru);
return an iterator of sentences. Each sentence is a list of Token
instances.
"""
xml = ElementTree.parse(source)
def punct_tokens(txt):
if not txt:
return []
tokens = [tok for tok in txt.split('\n')]
return [Token(tok, None) for tok in tokens if tok]
for se in xml.findall('se'):
sent = []
sent.extend(punct_tokens(se.text))
for w in se.findall('w'):
ana_elems = w.findall('ana')
# text after the last annotation is a word
word = ana_elems[-1].tail or ''
annotations = [
Annotation(a.get('lex'), a.get('gr'), a.get('joined'))
for a in ana_elems
]
sent.append(Token(word, annotations))
sent.extend(punct_tokens(w.tail))
sent.extend(punct_tokens(se.tail))
yield [t for t in sent if t.text.strip()]
def simplify(sents, remove_accents=True, join_split=True,
join_hyphenated=True, punct_tag='PNCT', wrap_tags=True,
flat_tokens=True):
"""
Simplify the result of ``sents`` parsing:
* keep only a single annotation per word part;
* annotate punctuation with ``punct_tag``;
* join split words into a single token (if ``join_split==True``);
* join hyphenated words to a single token (if ``join_hyphenated==True``);
* remove accents (if ``remove_accents==True``);
* convert string tag representation to ruscorpora.Tag instances
(if ``wrap_tags==True``);
* return tokens as FlatToken instances (if ``flat_tokens==True``).
"""
def remove_extra_annotations(token):
""" force token annotations to be a single-element list """
if token.annotations is None:
return (token.text, [None])
return (token.text, [token.annotations[-1]])
def _token_to_flat(token):
ann = token.annotations
if ann[0] is None:
return FlatToken(token.text, None, None, None)
if all(a.joined == 'together' for a in ann):
return FlatToken(
token.text,
"".join(a.lex for a in ann),
token.annotations[-1].gr,
'together'
)
if len(ann) == 2 and all(a.joined == 'hyphen' for a in ann):
ann1, ann2 = ann
tag = ann2.gr
if str(ann2.gr) in set(['PART', 'NUM=ciph', 'PR']):
tag = ann1.gr
return FlatToken(
token.text,
"-".join([ann1.lex, ann2.lex]),
tag,
'hyphen'
)
return FlatToken(token.text, ann[0].lex, ann[0].gr, ann[0].joined)
def _combine_tokens(tokens):
text = "".join(t[0] for t in tokens)
annotations = [ann for t in tokens for ann in t[1] if ann]
return (text, annotations)
def _join_tokens(sent, accum_size, should_accumulate):
accum = []
for text, annotations in sent:
ann = annotations[0]
if should_accumulate(text, ann, accum):
accum.append((text, annotations))
if len(accum) == accum_size:
yield _combine_tokens(accum)
accum = []
else:
if accum:
warnings.warn("unconsumed tokens: %s" % accum)
for tok in accum:
yield tok
accum = []
yield text, annotations
join_split_tokens = functools.partial(
_join_tokens,
accum_size=2,
should_accumulate = lambda text, ann, accum: ann and ann.joined == 'together'
)
join_hyphenated_tokens = functools.partial(
_join_tokens,
accum_size=3,
should_accumulate = lambda text, ann, accum: (ann and ann.joined == 'hyphen') or (accum and text.strip() == '-')
)
def fix_punct_tags(sent):
for text, annotations in sent:
new_annotations = []
for ann in annotations:
if ann is None:
ann = Annotation(text, punct_tag, None)
new_annotations.append(ann)
yield text, new_annotations
def with_wrapped_tags(sent):
for text, annotations in sent:
new_annotations = []
for ann in annotations:
new_annotations.append(ann._replace(gr=Tag(ann.gr)))
yield text, new_annotations
for sent in sents:
sent = map(remove_extra_annotations, sent)
if remove_accents:
sent = [(t[0].replace('`', ''), t[1]) for t in sent]
if join_split:
sent = join_split_tokens(sent)
if join_hyphenated:
sent = join_hyphenated_tokens(sent)
sent = fix_punct_tags(sent)
if wrap_tags:
sent = with_wrapped_tags(sent)
sent = [Token(*t) for t in sent]
if flat_tokens:
sent = [_token_to_flat(t) for t in sent]
yield sent
def parse_simple(source, **simplify_kwargs):
return simplify(parse_xml(source), **simplify_kwargs)
if __name__ == '__main__':
import sys
for sent in parse_simple(sys.argv[1]):
for tok in sent:
print(tok)
print("\n") | /ruscorpora-tools-0.3.tar.gz/ruscorpora-tools-0.3/ruscorpora/__init__.py | 0.6705 | 0.269719 | __init__.py | pypi |
from collections import Counter
__author__ = 'sergeysmetanin'
import os.path
import pickle
import numpy as np
class MultinominalNB:
def __init__(self, directory=None):
if directory is not None:
self._classes = self.__read_from_file(directory+'_classes.txt')
self._priors = self.__read_from_file(directory+'_priors.txt')
self._p_log = {label: self.__read_from_file(directory+'_p_log_' + label + '.txt') for label in self._classes }
#self._stop_list = self.__read_list(directory+'_stop_words_lemmas.txt')
def fit(self, samples, labels, features, freq=None, priors=None):
self._classes = list(set(labels))
if freq is None:
freq = np.array([[sum([count for j, count in enumerate(samples[:, i]) if labels[j] == label])
for i in xrange(samples.shape[1])] for label in self._classes], dtype=np.float32)
if priors is None:
self._priors = np.array([sum([label == self._classes[i] for label in labels])
for i in xrange(len(self._classes))], dtype=np.float32) / len(labels)
else:
self._priors = priors
freq = freq.T
p_log = np.log(freq + 1) - np.log(len(features) + sum(freq))
p_log = p_log.T
self._p_log = {label: {feature: p_log[i][j] for j, feature in enumerate(features)} for i, label in
enumerate(self._classes)}
def __inverse_label(self, label):
print label
i = self._classes.index(label)
#print self._classes[(i+1) % len(self._classes)]
return self._classes[(i+1) % len(self._classes)]
def proba_log(self, x, inverse):
counter = Counter(x)
return [sum([0 if X not in self._p_log[label if not inverse[i] else self.__inverse_label(label)] else counter[X] * self._p_log[label if not inverse[i] else self.__inverse_label(label)][X] for i, X in enumerate(counter)]) for
label in self._classes]
def proba(self, x):
counter = Counter(x)
proba = np.exp(
[sum([0 if X not in self._p_log[label] else counter[X] * self._p_log[label][X] for X in counter]) for label
in self._classes])
return proba / max(proba)
def predict(self, x, inverse):
return self._classes[np.argmax(self.proba_log(x, inverse))]
def score(self, x, y):
return float(sum([self.predict(x[i]) == y[i] for i in xrange(len(x))])) / len(x)
def save(self):
self.__save_to_file('_classes.txt', self._classes)
self.__save_to_file('_priors.txt', self._priors)
for label in self._classes:
self.__save_to_file('_p_log_' + label + '.txt', self._p_log[label])
@staticmethod
def __save_to_file(file_name, d):
output = open(file_name, 'w')
pickle.dump(d, output)
output.close()
@staticmethod
def __read_from_file(file_name):
pkl_file = open(os.path.dirname(__file__) +'/'+file_name, 'r')
d = pickle.load(pkl_file)
pkl_file.close()
return d
@staticmethod
def __read_list(file_name):
list_file = open(os.path.dirname(__file__) +'/'+file_name, 'r')
return [word.replace('\n','') for word in list_file] | /rusentiment-0.0.3.tar.gz/rusentiment-0.0.3/clf/classifier.py | 0.4436 | 0.20951 | classifier.py | pypi |
import re
import logging
from typing import Set, Tuple, List
SENT_RE = re.compile(r'[^\.?!…]+[\.?!…]*["»“]*')
_LAST_WORD = re.compile(r'(?:\b|\d)([a-zа-я]+)\.$', re.IGNORECASE)
_FIRST_WORD = re.compile(r'^\W*(\w+)')
_ENDS_WITH_ONE_LETTER_LAT_AND_DOT = re.compile(r'(\d|\W|\b)([a-zA-Z])\.$')
_HAS_DOT_INSIDE = re.compile(r'[\w]+\.[\w]+\.$', re.IGNORECASE)
_INITIALS = re.compile(r'(\W|\b)([A-ZА-Я]{1})\.$')
_ONLY_RUS_CONSONANTS = re.compile(r'^[бвгджзйклмнпрстфхцчшщ]{1,4}$', re.IGNORECASE)
_STARTS_WITH_EMPTYNESS = re.compile(r'^\s+')
_ENDS_WITH_EMOTION = re.compile(r'[!?…]|\.{2,}\s?[)"«»,“]?$')
_STARTS_WITH_LOWER = re.compile(r'^\s*[–-—-("«]?\s*[a-zа-я]')
_STARTS_WITH_DIGIT = re.compile(r'^\s*\d')
_NUMERATION = re.compile(r'^\W*[IVXMCL\d]+\.$')
_PAIRED_SHORTENING_IN_THE_END = re.compile(r'\b(\w+)\. (\w+)\.\W*$')
_JOIN = 0
_MAYBE = 1
_SPLIT = 2
JOINING_SHORTENINGS = {'mr', 'mrs', 'ms', 'dr', 'vs', 'англ', 'итал', 'греч', 'евр', 'араб', 'яп', 'слав', 'кит',
'тел', 'св', 'ул', 'устар', 'им', 'г', 'см', 'д', 'стр', 'корп', 'пл', 'пер', 'сокр', 'рис'}
SHORTENINGS = {'co', 'corp', 'inc', 'авт', 'адм', 'барр', 'внутр', 'га', 'дифф', 'дол', 'долл', 'зав', 'зам', 'искл',
'коп', 'корп', 'куб', 'лат', 'мин', 'о', 'обл', 'обр', 'прим', 'проц', 'р', 'ред', 'руб', 'рус', 'русск',
'сан', 'сек', 'тыс', 'эт', 'яз', 'гос', 'мн', 'жен', 'муж', 'накл', 'повел', 'букв', 'шутл', 'ед'}
PAIRED_SHORTENINGS = {('и', 'о'), ('т', 'е'), ('т', 'п'), ('у', 'е'), ('н', 'э')}
def _regex_split_separators(text: str) -> [str]:
return [x.strip() for x in SENT_RE.findall(text)]
def _is_sentence_end(left: str, right: str,
shortenings: Set[str],
joining_shortenings: Set[str],
paired_shortenings: Set[Tuple[str, str]]) -> int:
if not _STARTS_WITH_EMPTYNESS.match(right):
return _JOIN
if _HAS_DOT_INSIDE.search(left):
return _JOIN
left_last_word = _LAST_WORD.search(left)
lw = ' '
if left_last_word:
lw = left_last_word.group(1)
if lw.lower() in joining_shortenings:
return _JOIN
if _ONLY_RUS_CONSONANTS.search(lw) and lw[-1].islower():
return _MAYBE
pse = _PAIRED_SHORTENING_IN_THE_END.search(left)
if pse:
s1, s2 = pse.groups()
if (s1, s2) in paired_shortenings:
return _MAYBE
right_first_word = _FIRST_WORD.match(right)
if right_first_word:
rw = right_first_word.group(1)
if (lw, rw) in paired_shortenings:
return _MAYBE
if _ENDS_WITH_EMOTION.search(left) and _STARTS_WITH_LOWER.match(right):
return _JOIN
initials = _INITIALS.search(left)
if initials:
border, _ = initials.groups()
if (border or ' ') not in "°'":
return _JOIN
if lw.lower() in shortenings:
return _MAYBE
last_letter = _ENDS_WITH_ONE_LETTER_LAT_AND_DOT.search(left)
if last_letter:
border, _ = last_letter.groups()
if (border or ' ') not in "°'":
return _MAYBE
if _NUMERATION.match(left):
return _JOIN
return _SPLIT
def ru_sent_tokenize(text: str,
shortenings: Set[str] = SHORTENINGS,
joining_shortenings: Set[str] = JOINING_SHORTENINGS,
paired_shortenings: Set[Tuple[str, str]] = PAIRED_SHORTENINGS) -> List[str]:
sentences = []
sents = _regex_split_separators(text)
si = 0
processed_index = 0
sent_start = 0
while si < len(sents):
s = sents[si]
span_start = text[processed_index:].index(s) + processed_index
span_end = span_start + len(s)
processed_index += len(s)
si += 1
send = _is_sentence_end(text[sent_start: span_end], text[span_end:],
shortenings, joining_shortenings, paired_shortenings)
if send == _JOIN:
continue
if send == _MAYBE:
if _STARTS_WITH_LOWER.match(text[span_end:]):
continue
if _STARTS_WITH_DIGIT.match(text[span_end:]):
continue
if not text[sent_start: span_end].strip():
logging.warning("Something went wrong while tokenizing")
sentences.append(text[sent_start: span_end].strip())
sent_start = span_end
processed_index = span_end
if sent_start != len(text):
if text[sent_start:].strip():
sentences.append(text[sent_start:].strip())
return sentences
if __name__ == '__main__':
assert ru_sent_tokenize('купил за 5 руб. и остался доволен.') == ['купил за 5 руб. и остался доволен.']
assert ru_sent_tokenize('Я ему сказал и т.к. он не послушался за 500р.') == ['Я ему сказал и т.к. он не послушался за 500р.']
assert ru_sent_tokenize('Ура. Ура. 500р.') == ['Ура.', 'Ура.', '500р.']
assert ru_sent_tokenize('Среди других её представителей — Л. Р. Зиндер, Л. В. Бондарко, М. И. Матусевич.') == \
['Среди других её представителей — Л. Р. Зиндер, Л. В. Бондарко, М. И. Матусевич.']
assert ru_sent_tokenize('И. П. Павлов.') == ['И. П. Павлов.']
assert ru_sent_tokenize('Павлов И. П., Сеченов И. М.') == ['Павлов И. П., Сеченов И. М.']
assert ru_sent_tokenize('Основателем школы является Л. В. Щерба.') == ['Основателем школы является Л. В. Щерба.']
assert ru_sent_tokenize('Я ему сказале: "Чтобы ничего не трогале." Но он не послушался.') == \
['Я ему сказале: "Чтобы ничего не трогале."', 'Но он не послушался.']
assert ru_sent_tokenize('Нефть за $27/барр. не снится.') == ['Нефть за $27/барр. не снится.']
assert ru_sent_tokenize('Сказала я. Он оглянулся.') == ['Сказала я.', 'Он оглянулся.']
assert ru_sent_tokenize(
'Летописец Нестор относит их возникновение к I столетию н.э., когда св. Андрей, проповедуя в Киеве '
'евангельское слово, отправился потом в Новгород, где он увидел чудо – парившихся в бане.') == \
['Летописец Нестор относит их возникновение к I столетию н.э., когда св. Андрей, проповедуя в Киеве евангельское слово, отправился потом в Новгород, где он увидел чудо – парившихся в бане.']
assert ru_sent_tokenize(
'- Ну, хорошо, хочешь, я тебе тоннели покажу? - спрашивает наконец Мариам и сворачивает с одной ничем не примечательной улицы, застроенной обычными городскими многоэтажками, на другую точно такую же.') == ['- Ну, хорошо, хочешь, я тебе тоннели покажу? - спрашивает наконец Мариам и сворачивает с одной ничем не примечательной улицы, застроенной обычными городскими многоэтажками, на другую точно такую же.']
assert ru_sent_tokenize('Где они были эти …адцать лет?') == ['Где они были эти …адцать лет?']
assert ru_sent_tokenize('Православие... более всего подходит на роль такой идеи...') == ['Православие... более всего подходит на роль такой идеи...']
assert ru_sent_tokenize('Yolka стоит 2400р. без трех копеек сто долларов, между прочим.') == ['Yolka стоит 2400р. без трех копеек сто долларов, между прочим.']
assert ru_sent_tokenize(
'А если лень читать всё - общее количество ответов: 8272! - то можно почитать книжку избранных мест.') == ['А если лень читать всё - общее количество ответов: 8272! - то можно почитать книжку избранных мест.']
assert ru_sent_tokenize('Это стоило 50 т. к. вчера') == ['Это стоило 50 т. к. вчера']
assert ru_sent_tokenize(
"Официально закрытие фастфудов назначено на предстоящее воскресенье, причём менеджеры не планируют снова открывать в этой стране рестораны McDonald's. Причин закрытия исландских McDonald's несколько.") == \
["Официально закрытие фастфудов назначено на предстоящее воскресенье, причём менеджеры не планируют снова открывать в этой стране рестораны McDonald's.",
"Причин закрытия исландских McDonald's несколько."]
assert ru_sent_tokenize(
'12 января ожидается понижение до минус 44 — минус 48°C. В школах региона отменены занятия в начальных классах.') == \
['12 января ожидается понижение до минус 44 — минус 48°C.',
'В школах региона отменены занятия в начальных классах.']
assert ru_sent_tokenize(
'У государственных людей тоже есть дети, и если для них ночные заведения работать-таки будут… (а вы попробуйте им отказать) ну, в общем, не хотелось бы думать о волне народного возмущения.') == \
['У государственных людей тоже есть дети, и если для них ночные заведения работать-таки будут… (а вы попробуйте им отказать) ну, в общем, не хотелось бы думать о волне народного возмущения.']
assert ru_sent_tokenize(
'По сравнению с 2009 годом Россия опустилась в рейтинге на 9 позиций (т. е. ситуация в ней улучшилась).') == \
['По сравнению с 2009 годом Россия опустилась в рейтинге на 9 позиций (т. е. ситуация в ней улучшилась).']
assert ru_sent_tokenize('Привет.Тест (прим. ред. - на токенизацию). \"По моим наблюдениям, выгоднее было бы поставлять газ в Азию через Аравийское море. Проект иранской газовой трубы через Турцию — долгосрочная перспектива\", - сказал Сайед Мухаммед Хусейн Адели.') == \
['Привет.Тест (прим. ред. - на токенизацию).',
'"По моим наблюдениям, выгоднее было бы поставлять газ в Азию через Аравийское море.',
'Проект иранской газовой трубы через Турцию — долгосрочная перспектива", - сказал Сайед Мухаммед Хусейн Адели.']
logging.info('all tests passed!') | /rusenttokenize-0.0.5-py3-none-any.whl/ru_sent_tokenize/tokenizer.py | 0.42179 | 0.278532 | tokenizer.py | pypi |
import sys
from collections import OrderedDict
import click
from rush_cli.read_tasks import ReadTasks
from rush_cli.utils import beautify_task_cmd
from rush_cli.utils import beautify_task_name
from rush_cli.utils import scream
class PrepTasks(ReadTasks):
"""Class for preprocessing tasks before running."""
def __init__(self, *args, no_deps=False, **kwargs):
super().__init__(**kwargs)
self.filter_names = args
self.no_deps = no_deps
@staticmethod
def _clean_tasks(yml_content):
"""Splitting stringified tasks into into a list of individual tasks."""
cleaned_tasks = OrderedDict()
for task_name, task_chunk in yml_content.items():
if task_chunk:
task_chunk = task_chunk.rstrip()
task_chunk = task_chunk.split("\n")
cleaned_tasks[task_name] = task_chunk
else:
cleaned_tasks[task_name] = ""
return cleaned_tasks
def _replace_placeholder_tasks(self, task_chunk: list, cleaned_tasks: dict) -> list:
"""Recursively replace dependant task names with actual task commands."""
for idx, task in enumerate(task_chunk):
if isinstance(task, str):
if task in cleaned_tasks.keys():
if not self.no_deps:
task_chunk[idx] = cleaned_tasks[task]
else:
task_chunk[idx] = ""
else:
task_chunk[idx] = PrepTasks._replace_placeholder_tasks(
task, cleaned_tasks
)
return task_chunk
@classmethod
def _flatten_task_chunk(cls, nested_task_chunk: list) -> list:
"""Recursively converts a nested task list to a flat list."""
flat_task_chunk = []
for elem in nested_task_chunk:
if isinstance(elem, list):
flat_task_chunk.extend(cls._flatten_task_chunk(elem))
else:
flat_task_chunk.append(elem)
return flat_task_chunk
@staticmethod
def _filter_tasks(cleaned_tasks: dict, *filter_names) -> dict:
"""Filter tasks selected by the user."""
if filter_names:
try:
filtered_tasks = {k: cleaned_tasks[k] for k in filter_names}
return filtered_tasks
except KeyError:
not_found_tasks = [
k for k in filter_names if k not in cleaned_tasks.keys()
]
click.secho(
f"Error: Tasks {not_found_tasks} do not exist.", fg="magenta"
)
sys.exit(1)
else:
return cleaned_tasks
def get_prepared_tasks(self):
"""Get the preprocessed task dict."""
yml_content = super().read_rushfile()
cleaned_tasks = self._clean_tasks(yml_content)
# replace placeholders and flatten
for task_name, task_chunk in cleaned_tasks.items():
task_chunk = self._replace_placeholder_tasks(task_chunk, cleaned_tasks)
task_chunk = self._flatten_task_chunk(task_chunk)
task_chunk = "\n".join(task_chunk)
cleaned_tasks[task_name] = task_chunk
# apply filter
cleaned_tasks = self._filter_tasks(cleaned_tasks, *self.filter_names)
return cleaned_tasks
class Views(PrepTasks):
"""View ad hoc tasks."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filter_names = args
@property
def view_rushpath(self):
rushfile_path = self.find_rushfile()
click.secho(rushfile_path, fg="cyan")
@property
def view_tasks(self):
cleaned_tasks = self.get_prepared_tasks()
scream(what="view")
for k, v in cleaned_tasks.items():
beautify_task_name(k)
beautify_task_cmd(v)
@property
def view_tasklist(self):
deps = self._prep_deps()
scream(what="list")
click.echo()
for k, v in deps.items():
click.secho("-" + " " + k, fg="yellow")
for cmd in v:
click.echo(" " * 2 + "-" + " " + cmd)
def _prep_deps(self):
"""Preparing a dependency dict from yml contents."""
# reading raw rushfile as a dict
yml_content = self.read_rushfile()
# splitting dict values by newlines
yml_content = {k: v.split("\n") for k, v in yml_content.items() if v}
# finding task dependencies
deps = {}
for k, v in yml_content.items():
lst = []
for cmd in v:
if cmd in yml_content.keys():
lst.append(cmd)
deps[k] = lst
# filter dependencies
deps = self._filter_tasks(deps, *self.filter_names)
return deps | /rush-cli-0.6.1.tar.gz/rush-cli-0.6.1/rush_cli/prep_tasks.py | 0.505615 | 0.230649 | prep_tasks.py | pypi |
import asyncio
import functools
import inspect
import time
import typing
import attr
from rush import exceptions
from rush import result
from rush import throttle as _throttle
@attr.s
class ThrottleDecorator:
"""The class that acts as a decorator used to throttle function calls.
This class requires an intantiated throttle with which to limit function
invocations.
.. attribute:: throttle
The :class:`~rush.throttle.Throttle` which should be used to limit
decorated functions.
"""
throttle: _throttle.Throttle = attr.ib()
def _check(self, key: str) -> result.RateLimitResult:
result = self.throttle.check(key=key, quantity=1)
if result.limited:
raise ThrottleExceeded("Rate-limit exceeded", result=result)
return result
def __call__(self, func: typing.Callable) -> typing.Callable:
"""Wrap a function with a Throttle.
:param callable func:
The function to decorate.
:return:
Decorated function.
:rtype:
:class:`~typing.Callable`
"""
key = func.__name__
if inspect.iscoroutinefunction(func):
@functools.wraps(func)
async def wrapper(*args, **kwargs) -> typing.Callable:
"""Throttle the decorated function.
Extend the behaviour of the decorated function, forwarding
function calls if the throttle allows. The decorator will
raise an exception if the function cannot be called so the
caller may implement a retry strategy.
:param args:
non-keyword arguments to pass to the decorated function.
:param kwargs:
keyworded arguments to pass to the decorated function.
:raises:
`~rush.contrib.decorator.ThrottleExceeded`
"""
self._check(key=key)
return await func(*args, **kwargs)
else:
@functools.wraps(func)
def wrapper(*args, **kwargs) -> typing.Callable:
"""Throttle the decorated function.
Extend the behaviour of the decorated function, forwarding
function calls if the throttle allows. The decorator will
raise an exception if the function cannot be called so the
caller may implement a retry strategy.
:param args:
non-keyword arguments to pass to the decorated function.
:param kwargs:
keyworded arguments to pass to the decorated function.
:raises:
`~rush.contrib.decorator.ThrottleExceeded`
"""
self._check(key=key)
return func(*args, **kwargs)
return wrapper
def sleep_and_retry(self, func: typing.Callable) -> typing.Callable:
"""Wrap function with a naive sleep and retry strategy.
:param Callable func:
The :class:`~typing.Callable` to decorate.
:return:
Decorated function.
:rtype:
:class:`~typing.Callable`
"""
throttled_func = self(func)
if inspect.iscoroutinefunction(func):
@functools.wraps(func)
async def wrapper(*args, **kwargs) -> typing.Callable:
"""Perform naive sleep and retry strategy.
Call the throttled function. If the function raises a
``ThrottleExceeded`` exception sleep for the recommended
time and retry.
:param args:
non-keyword arguments to pass to the decorated function.
:param kwargs:
keyworded arguments to pass to the decorated function.
"""
while True:
try:
return await throttled_func(*args, **kwargs)
except ThrottleExceeded as e:
await asyncio.sleep(
e.result.retry_after.total_seconds()
)
else:
@functools.wraps(func)
def wrapper(*args, **kwargs) -> typing.Callable: # type: ignore
"""Perform naive sleep and retry strategy.
Call the throttled function. If the function raises a
``ThrottleExceeded`` exception sleep for the recommended
time and retry.
:param args:
non-keyword arguments to pass to the decorated function.
:param kwargs:
keyworded arguments to pass to the decorated function.
"""
while True:
try:
return throttled_func(*args, **kwargs)
except ThrottleExceeded as e:
time.sleep(e.result.retry_after.total_seconds())
return wrapper
class ThrottleExceeded(exceptions.RushError):
"""The rate-limit has been exceeded."""
def __init__(self, message, *, result: result.RateLimitResult) -> None:
"""Handle extra arguments for easier access by users."""
super().__init__(message)
self.result = result | /contrib/decorator.py | 0.764188 | 0.244048 | decorator.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rushed_distributions-0.1.tar.gz/rushed_distributions-0.1/rushed_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
import logging
from typing import Any, Dict, List, Optional, TypedDict, Union
import requests
logger = logging.getLogger(__name__)
class RushmoreResponse(TypedDict):
"""Type class for Rushmore Response."""
TotalWells: Optional[int]
TotalPages: Optional[int]
PageInfo: Optional[Dict[str, Any]]
Data: Optional[List[Any]]
class RushmoreReport:
"""Basic class for adding reports as subclasses to RushmoreExtractor."""
def __init__(self, report_name: str, api_key: str, page_size: Optional[int] = 1000):
self.api_key = api_key
self.report_name = report_name
self._page_size = page_size
@property
def page_size(self):
"""For making page_size an editable property."""
return self._page_size
@page_size.setter
def page_size(self, value):
if value > 0 and isinstance(value, int):
self._page_size = value
elif not isinstance(value, int):
raise TypeError("Incorrect type. Specify a positive integer for page size.")
else:
raise ValueError(
"Incorrect value. Specify a positive integer for page size."
)
def get(
self,
data_filter: Optional[str] = None,
full_response: Optional[bool] = True,
max_pages: Optional[int] = None,
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
"""Retrieves all raw data from relevant Rushmore Review.
Args:
data_filter: Filtering string according to API specification.
full_response: Pass True to retrieve full response from Rushmore.
False retrieves only the well data list component.
max_pages: Optional argument to reduce number of pages retrieved
from Rushmore, for testing purposes.
Returns:
List of dicts where each dict describes an entry in the Rushmore
Review.
"""
return get_data(
api_key=self.api_key,
report_name=self.report_name,
full_response=full_response,
page_size=self.page_size,
data_filter=data_filter,
max_pages=max_pages,
)
def get_data_page(
api_key: str,
report_name: str,
page_size: int,
api_version: Optional[str] = "0.1",
page: Optional[int] = 1,
data_filter: Optional[str] = None,
) -> RushmoreResponse:
"""Queries data from Rushmore.
Args:
page_size: Number of rows requested per page.
page: The page number that is requested.
data_filter: Custom filters for what data to include.
Returns:
One page of data from Rushmore as a JSON serializable
dictionary with keys according to the standard API payload.
"""
# Rushmore API uses X-API-key authorization.
header = {"X-API-key": api_key}
base_url = (
f"https://data-api.rushmorereviews.com/v{api_version}/wells/{report_name}"
)
url = f"{base_url}?page={page}&pageSize={page_size}"
if data_filter:
url = f"{url}&filter={data_filter}"
response = requests.get(
url=url,
headers=header,
)
# Checks for non-2xx responses
response.raise_for_status()
return response.json()
def _check_response(response: Dict[str, Any]) -> None:
"""Simple check for overflow error in response.
Args:
response: Rushmore API response.
Raises:
ValueError if page size causes response to overflow.
"""
logger.debug("Checking response for error messages.")
try:
response["fault"]
except KeyError:
pass
else:
error: str = response["fault"]["faultstring"]
if error == "Body buffer overflow":
raise ValueError("Response too large. Reduce page size.")
raise Exception(f"Error was thrown: {error}.")
try:
response["error"]
except KeyError:
pass
else:
error: str = response["error_description"]
raise Exception(f"Error was thrown: {error}.")
def get_data(
api_key: str,
report_name: str,
full_response: Optional[bool] = True,
page_size: Optional[int] = 1000,
data_filter: Optional[str] = None,
max_pages: Optional[int] = None,
) -> Union[RushmoreResponse, List[Dict[str, Any]]]:
"""Queries all data from Rushmore.
For the instantiated performance review, iterates through all
available pages to query an unfiltered list of rows.
TODO: Look into improving looping logic.
Args:
data_filter: Submit a well-formed filter string according to the Rushmore
API specification. This filter will be passed to the API.
Returns:
A list of dicts that each describe a well in the provided
performance review.
Raises:
ValueError if page size exceeds maximum allowable.
Exception for other API errors.
"""
output: Optional[RushmoreResponse] = None
page = 1
while True:
logger.info(f"Fetching page {page} from {report_name.upper()}")
response = get_data_page(
api_key=api_key,
report_name=report_name,
page_size=page_size,
page=page,
data_filter=data_filter,
)
# Response checker catches error / failure responses
_check_response(response)
logger.info(f"Fetched {len(response['Data'])} rows.")
if output:
output["Data"].extend( # pylint: disable=unsubscriptable-object
response["Data"]
)
else:
output = response
# Determine number of pages to fetch.
# TODO: Revise logic if lightweight API calls are available.
if not max_pages:
num_pages = response["TotalPages"]
else:
num_pages = min(max_pages, response["TotalPages"])
if num_pages > page:
page += 1
else:
logger.info(f"Extraction complete. {len(output['Data']):,} rows fetched.")
if full_response:
return output
return output["Data"] | /rushmore-tools-0.1.2.tar.gz/rushmore-tools-0.1.2/rushmore_tools/_api/api.py | 0.884121 | 0.222732 | api.py | pypi |
from typing import Optional, Tuple
def rig_type(arg: str) -> Tuple[str, bool]:
"""Converts Rushmore Rig Type designation keyword to text.
Returns:
Tuple, where:
1st element is the rig type
2nd element is for determining whether a dual action rig was part
of the supplied argument - useful for Drilling Performance Review.
Raises:
ValueError for rig types not specified in Rushmore documentation.
"""
rig_types = {
# Land rigs
"LA": "Land rig (Rented)",
"LO": "Land rig (Owned)",
"HR": "Heli-rig",
"OL": "Other land rig",
# Shallow barge rigs
"SU": "Submersible",
"BA": "Barge",
# Offshore rigs - fixed
"HP": "Hydraulic workover unit",
"PL": "Platform - rigid leg",
"PT": "Platform - tethered leg",
"PS": "Platform - spar",
"SP": "Permanently moored",
# Offshore rigs - mobile
"JK": "Jack-up",
"JP": "Jack-up over platform",
"TB": "Platform tender-assisted barge",
"TS": "Platform tender-assisted semi-sub",
"TJ": "Platform tender-assisted jack-up",
"DS": "Drillship",
"SS": "Semi-submersible",
}
if not arg:
return ("N/A", False)
distinct_rigs = {i for i in rig_types if i in arg}
if not distinct_rigs:
raise ValueError(f"Rig type designation '{arg}' is unknown.")
if len(distinct_rigs) > 1:
if distinct_rigs == {"SS", "TS"}:
output_type = rig_types["SS"]
elif distinct_rigs == {"BA", "TB"}:
output_type = rig_types["BA"]
elif distinct_rigs == {"JK", "TJ"}:
output_type = rig_types["JK"]
else:
output_type = "Several"
else:
[rig] = distinct_rigs
output_type = rig_types[rig]
# Assuming if dual action is listed among a list of rigs, it is significant
is_dual_action = "(2)" in arg
return (output_type, is_dual_action)
def hole_type(arg: str) -> Optional[str]:
"""Converts Rushmore hole types to text."""
hole_types = {
"N": "New well",
"G": "Geological sidetrack",
"S": "Slot recovery",
"O": "Other",
}
return hole_types.get(arg.upper(), None)
def well_type(arg: str) -> Optional[str]:
"""Converts Rushmore well types to text."""
well_types = {
"E": "Exploration",
"D": "Development",
"A": "Appraisal",
}
return well_types.get(arg.upper(), None) | /rushmore-tools-0.1.2.tar.gz/rushmore-tools-0.1.2/rushmore_tools/utils/conversion_functions.py | 0.878588 | 0.431285 | conversion_functions.py | pypi |
import base64
import json
import struct
from typing import Dict, List, Optional
from russel_python_interface.basic_routines import StandardRoutine
from russel_python_interface.routine import Routine
class TaskSet:
my_task_id: Dict[str, int] = {}
# Id that this task set will get from the russel daemon
routine: Routine = None
data: List[float] = [] # data of already defined variables
unset_variables: List[int] = [] # Variables that will be defined later
required_variables: List[int] = [] # Variables that will be send back
usable: bool = False
@staticmethod
def create_task_set(data: Dict[int, List[float]], template: StandardRoutine) -> "TaskSet":
task: TaskSet = TaskSet()
task.routine = Routine.create_from_template(template)
task.required_variables = template.return_vars
set_variables: List[int] = []
for key in data:
set_variables.append(key)
task.data += data[key]
for key in range(template.variable_counter):
if key not in set_variables:
task.unset_variables.append(key)
return task
def serialize(self) -> dict:
return_dict: dict = {
"routine_name": self.routine.name,
"required_vars": self.required_variables,
"unset_variables": self.unset_variables,
}
data: bytes = b""
for value in self.data:
data += struct.pack("f", value)
output: str = base64.b64encode(data).decode()
return_dict["data"] = output
return return_dict
class TaskSetTask:
my_task_id: Dict[str, int] = {}
data: List[float] = []
solved: bool = False
response: Dict[int, List[float]] = {}
done: bool = False
def serialize(self, engine_id: str) -> Optional[dict]:
if engine_id not in self.my_task_id:
return
data: bytes = b""
for value in self.data:
data += struct.pack("f", value)
output: str = base64.b64encode(data).decode()
return_dict: dict = {"task_set_id": self.my_task_id[engine_id], "data": output}
return return_dict
def encode_data(self, key: int, data: str):
decoded: bytes = base64.b64decode(data)
formatted_data: List[float] = []
for i in range(0, int(len(decoded) / 4)):
formatted_data.append(struct.unpack("f", decoded[i * 4: (i + 1) * 4])[0])
self.response[key] = formatted_data
def set_done(self):
self.done = True
def got_response(self) -> bool:
return self.done
def get_size(self) -> int:
key: str = list(self.my_task_id.keys())[0]
json_data = self.serialize(key)
return len(json.dumps(json_data)) | /russel-python-interface-0.1a3.tar.gz/russel-python-interface-0.1a3/russel_python_interface/task_sets.py | 0.69987 | 0.262777 | task_sets.py | pypi |
import time
from _thread import start_new_thread
from typing import List, Dict
from uuid import uuid4
import russel_python_interface.static_vars
from russel_python_interface.basic_routines import StandardRoutine, MatrixScalarProd, MatrixVectorMulti, MatrixSum
from russel_python_interface.benchmark import BenchmarkData
from russel_python_interface.network_socket import Socket
from russel_python_interface.routine import Routine
from russel_python_interface.task import Task
from russel_python_interface.task_sets import TaskSet, TaskSetTask
class Engine:
unix_path: str = ""
host: str = ""
port: int = 0
local: bool = True
running: bool = False
socket: Socket = None
result_set: Dict[int, Task] = {}
pending_unassigned_task: Dict[str, Task] = {}
returned_requests: Dict[str, dict] = {}
waiting_tokens: List[str] = [] # Messages with tokens that are not contained in this list will be deleted
task_respond_func = None
delete_from_set = None
engine_id: str = ""
benchmark: bool = False
benchmark_data: BenchmarkData = None
@staticmethod
def create_connect_to_local(unix_path: str = "/etc/russel/russel.sock", benchmark: bool = False) -> "Engine":
engine: Engine = Engine()
engine.socket = Socket.create_ipc(unix_path)
engine.engine_id = str(uuid4())
engine.benchmark = benchmark
if benchmark:
engine.benchmark_data = BenchmarkData()
return engine
@staticmethod
def create_connect_to_network(host: str = "127.0.0.1", port: int = 8321, benchmark: bool = False) -> "Engine":
engine: Engine = Engine()
engine.socket = Socket.create_network(host, port)
engine.engine_id = str(uuid4())
engine.benchmark = benchmark
if benchmark:
engine.benchmark_data = BenchmarkData()
return engine
@staticmethod
def create_from_uri(uri: str, benchmark: bool = False) -> "Engine":
engine: Engine = Engine()
engine.socket = Socket.create_from_uri(uri)
engine.engine_id = str(uuid4())
engine.benchmark = benchmark
if benchmark:
engine.benchmark_data = BenchmarkData()
return engine
def upload_all_local_routines(self):
self.upload_routine(MatrixScalarProd)
self.upload_routine(MatrixSum)
self.upload_routine(MatrixVectorMulti)
def start(self):
if not self.running:
start_new_thread(self.receiving_loop, ())
self.running = True
def kill(self):
self.running = False
self.socket.close()
def force_schedule(self):
data: dict = {"id": russel_python_interface.static_vars.work_scheduler_id, "command": "api", "data": {}}
data["data"]["command"] = "force_schedule"
self.socket.send_json(data)
def run_task(
self, name: str, data: List[float], required_vars: List[int]
) -> str: # TOOD: later than also numpy stuff
task: Task = Task.create_from_file(name, data)
task.required_vars = required_vars
return self.run_prepared_task(task)
def run_template_task(self, template: StandardRoutine, data: List[float]) -> str:
task: Task = Task.create_from_template(template, data)
task.required_vars = template.return_vars
return self.run_prepared_task(task)
def run_prepared_task(self, task: Task) -> str:
data: dict = {
"id": russel_python_interface.static_vars.work_scheduler_id,
"command": "api",
"data": task.serialize(self.engine_id),
"token": str(uuid4()),
}
if type(task) is Task:
data["data"]["command"] = "register_new_task"
elif type(task) is TaskSetTask:
data["data"]["command"] = "task_from_set"
self.pending_unassigned_task[data["token"]] = task
self.socket.send_json(data)
return data["token"]
def upload_routine(self, template: StandardRoutine):
data: dict = {
"command": "api",
"id": russel_python_interface.static_vars.routine_manager_id,
"data": Routine.create_from_template(template).serialize(),
"token": str(uuid4()),
}
data["data"]["command"] = "save_and_add"
if self.benchmark:
self.benchmark_data.track_payload_size(len(str(data)), False)
self.benchmark_data.track(data["token"], False)
self.socket.send_json(data)
def task_done(self, token: str) -> bool:
return self.pending_unassigned_task[token].solved
def resend_task(self, token: str) -> None:
self.run_prepared_task(self.pending_unassigned_task[token])
def make_request(self, data: dict, wait: bool = False) -> str:
uuid: str = str(uuid4())
data["token"] = uuid
if self.benchmark:
self.benchmark_data.track_payload_size(len(str(data)), False)
self.benchmark_data.track(data["token"], False)
self.socket.send_json(data)
if wait:
timeout: int = int(time.time())
# Waits 10 Seconds than returns maybe throw a exception
while uuid not in self.returned_requests.keys() and time.time() - timeout < 10:
time.sleep(0.05)
return uuid
def get_task(self, token: str) -> Task:
return self.pending_unassigned_task[token]
def get_request(self, token: str) -> dict:
if token in self.returned_requests:
data: dict = self.returned_requests[token]
del self.returned_requests[token]
return data
def receiving_loop(self):
while self.running:
message: dict = self.socket.receive()
if message is None:
continue
if "token" in message.keys() and message["token"] in self.waiting_tokens:
self.returned_requests[message["token"]] = message
if self.benchmark:
self.benchmark_data.track(message["token"], False)
self.waiting_tokens.remove(message["token"])
if "command" in message and message["command"] == "task_return":
task: Task = self.pending_unassigned_task[message["token"]]
token: str = message["token"]
del message["command"]
del message["token"]
for key in message.keys(): # Sets data so it can be accessed
task.encode_data(int(key), message[key])
task.set_done()
if self.benchmark:
self.benchmark_data.track(token, True)
if self.task_respond_func is not None:
self.task_respond_func(token, task)
if self.delete_from_set is not None:
if token in self.delete_from_set:
del self.delete_from_set[token]
if "command" in message and message["command"] == "task_return_error":
raise RuntimeError("Routine Failed")
def set_task_handler(self, task_handler):
self.task_respond_func = task_handler
def register_task_set(self, task_set: TaskSet):
token: str = str(uuid4())
data: dict = {
"id": russel_python_interface.static_vars.work_scheduler_id,
"command": "api",
"token": token,
"data": task_set.serialize(),
}
data["data"]["command"] = "create_task_set"
self.waiting_tokens.append(token)
if self.benchmark:
self.benchmark_data.track_payload_size(len(str(data)), False)
self.benchmark_data.track(data["token"], False)
self.socket.send_json(data)
while token not in self.returned_requests.keys():
print("Waiting for return id")
time.sleep(0.05)
print(self.returned_requests)
return_data: dict = self.get_request(token)
print(return_data)
task_set.my_task_id[self.engine_id] = return_data["task_set_id"]
task_set.usable = True
def send_task_set_task(self, task: TaskSetTask) -> str:
data: dict = {
"id": russel_python_interface.static_vars.work_scheduler_id,
"command": "api",
"data": task.serialize(self.engine_id),
"token": str(uuid4()),
}
data["data"]["command"] = "task_from_set"
if self.benchmark:
self.benchmark_data.track_payload_size(len(str(data)), False)
self.benchmark_data.track(data["token"], False)
self.pending_unassigned_task[data["token"]] = task
self.socket.send_json(data)
return data["token"]
def delete_task_set(self, task_set: TaskSet):
id: int = task_set.my_task_id[self.engine_id]
data: dict = {
"command": "api",
"id": russel_python_interface.static_vars.work_scheduler_id,
"data": {"command": "remove_task_set", "task_set_id": id},
}
if self.benchmark:
self.benchmark_data.track_payload_size(len(str(data)), False)
self.socket.send_json(data)
def set_delete_from_set(self, set):
self.delete_from_set = set | /russel-python-interface-0.1a3.tar.gz/russel-python-interface-0.1a3/russel_python_interface/engine.py | 0.684264 | 0.174164 | engine.py | pypi |
import time
from typing import Any, Dict, Tuple, Callable
# This file generates Statistics
class BenchmarkData:
average_response_time: float = 0
average_response_time_tasks: float = 0
variance_in_response_time: float = 0
variance_in_task_response_time: float = 0
max_response_time: float = 0
min_response_time: float = 0
task_count: int = 0
response_times: Dict[float, Tuple[float, bool]] = {}
average_payload_size: float = 0
average_task_payload_size: float = 0
variance_in_payload_sizes: float = 0
variance_in_task_payload_sizes: float = 0
max_payload_size: float = 0
min_payload_size: float = 0
payload_sum: float = 0
time_id_tracker: Dict[Any, float] = {}
payload_size_tracker: Dict[float, Tuple[float, bool]] = {}
start_time: float = None
def __init__(self):
self.start_time = time.time()
def track(self, some_id, typ: bool = True) -> None:
if some_id in self.time_id_tracker:
self.response_times[time.time() - self.start_time] = (time.time() - self.time_id_tracker[some_id], typ)
self.task_count += 1 if typ else 0
del self.time_id_tracker[some_id]
else:
self.time_id_tracker[some_id] = time.time()
def track_payload_size(self, payload_size: float, typ: bool = True):
self.payload_size_tracker[time.time() - self.start_time] = (payload_size, typ)
self.payload_sum += payload_size
def finish_benchmark_response_time(self):
if len(self.response_times) != 0 and self.task_count != 0:
self.calculate_average_response_time()
self.calculate_variances_response_time()
if len(self.payload_size_tracker) != 0 and self.task_count != 0:
self.calculate_averages_payload_size()
self.calculate_variances_payload_size()
self.find_specific_intervals_borders()
@staticmethod
def calculate_average(data: Dict[float, Tuple[float, bool]], size_1: int, size_2: int) -> Tuple[float, float]:
sum_1: float = 0
sum_2: float = 0
for key, value in data.items():
sum_1 += value[0]
if value[1]:
sum_2 += value[0]
return sum_1 / size_1, sum_2 / size_2
@staticmethod
def calculate_variance(
data: Dict[float, Tuple[float, bool]], info_1: Tuple[float, int], info_2: Tuple[float, int]
) -> Tuple[float, float]:
sum_1: float = 0
sum_2: float = 0
for key, value in data.items():
sum_1 += pow(info_1[0] - value[0], 2)
if value[1]:
sum_2 += pow(info_2[0] - value[0], 2)
return sum_1 / info_1[1], sum_2 / info_2[1]
@staticmethod
def find_intervals(data: Dict[float, Tuple[float, bool]], search_obj: bool, typ: bool = None) -> float:
method: Callable = min if search_obj else max
searched_value: float = 1000000 if search_obj else -1000000
for key, value in data.items():
if typ is None:
searched_value = method(searched_value, value[0])
elif typ and value[0]:
searched_value = method(searched_value, value[0])
else:
searched_value = method(searched_value, value[0])
return searched_value
def find_specific_intervals_borders(self):
self.max_response_time = BenchmarkData.find_intervals(self.response_times, False) # Min Search
self.min_response_time = BenchmarkData.find_intervals(self.response_times, True) # Max Search
self.max_payload_size = BenchmarkData.find_intervals(self.payload_size_tracker, False)
self.min_payload_size = BenchmarkData.find_intervals(self.payload_size_tracker, True)
def calculate_average_response_time(self) -> None:
self.average_response_time, self.average_response_time_tasks = self.calculate_average(
self.response_times, len(self.response_times), self.task_count
)
def calculate_variances_response_time(self) -> None:
self.variance_in_response_time, self.variance_in_task_response_time = self.calculate_variance(
self.response_times,
(self.average_response_time, len(self.response_times)), (self.average_response_time_tasks, self.task_count),
)
def calculate_averages_payload_size(self):
self.average_payload_size, self.average_task_payload_size = BenchmarkData.calculate_average(
self.payload_size_tracker, len(self.payload_size_tracker), self.task_count
)
def calculate_variances_payload_size(self) -> None:
self.variance_in_payload_sizes, self.variance_in_task_payload_sizes = BenchmarkData.calculate_variance(
self.payload_size_tracker,
(self.average_payload_size, len(self.payload_size_tracker)),
(self.average_task_payload_size, self.task_count),
)
def __str__(self):
time_needed: float = max(list(self.response_times.keys()) + [1])
return_string: str = "\n========================== Internal Benchmarks ==========================" \
+ "\nTasks Tracked: " + str(self.task_count) \
+ "\n==============================================" \
+ "\nAverage Response Time: " + str(self.average_response_time) \
+ "\nAverage Task Response Time: " + str(self.average_response_time_tasks) \
+ "\nVariance in Response Times: " + str(self.variance_in_response_time) \
+ "\nVariance in Task Response Times: " + str(self.variance_in_task_response_time) \
+ "\nMax Response Time: " + str(self.max_response_time) \
+ "\nMin Response Time: " + str(self.min_response_time) \
+ "\n==============================================" \
+ "\nAverage Payload Size: " + str(self.average_payload_size) \
+ "\nAverage Task Payload Size: " + str(self.average_task_payload_size) \
+ "\nVariance in Payload Sizes: " + str(self.variance_in_payload_sizes) \
+ "\nVariance in Task Payload Sizes: " + str(self.variance_in_task_payload_sizes) \
+ "\nMax Payload Size: " + str(self.max_payload_size) \
+ "\nMin Payload Size: " + str(self.min_payload_size) \
+ "\n==============================================" \
+ "\nTotal Time: " + str(time_needed) \
+ "\nTasks per Second: " + str(self.task_count / time_needed) \
+ "\nPayload per Second: " + str(self.payload_sum / time_needed)
return return_string | /russel-python-interface-0.1a3.tar.gz/russel-python-interface-0.1a3/russel_python_interface/benchmark.py | 0.829492 | 0.521776 | benchmark.py | pypi |
import copy
import time
from typing import List, Dict
from russel_python_interface.engine import Engine
from russel_python_interface.task_sets import TaskSetTask, TaskSet
class MultiEngineNetwork:
engines: Dict[float, Engine] = {}
persistent: List[Engine] = []
@staticmethod
def create(endpoints: List[List[str]]) -> "MultiEngineNetwork":
network: MultiEngineNetwork = MultiEngineNetwork()
for endpoint in endpoints:
if endpoint[1] == "IPC":
e: Engine = Engine.create_connect_to_local(endpoint[0])
e.start()
e.upload_all_local_routines()
network.engines[time.time()] = e
network.persistent.append(e)
elif endpoint[1] == "PUB":
e: Engine = Engine.create_from_uri(endpoint[0])
e.start()
e.upload_all_local_routines()
network.engines[time.time()] = e
network.persistent.append(e)
return network
def schedule_task_from_task_set(self, task: TaskSetTask) -> (str, int):
last_update: float = min(self.engines.keys())
e: Engine = self.engines[last_update]
token: str = e.send_task_set_task(task)
current_time: float = time.time()
del self.engines[last_update]
self.engines[current_time] = e
return token, list(self.persistent).index(e)
def send_task_set(self, task_set: TaskSet):
for k in self.engines:
self.engines[k].register_task_set(task_set)
def solve_task_batch(self, tasks: List[TaskSetTask]):
expecting_task: Dict[str, int] = {}
for k in self.engines:
self.engines[k].set_delete_from_set(expecting_task)
for task in tasks:
token, index = self.schedule_task_from_task_set(task)
print(index, token)
expecting_task[token] = index
for k in self.engines:
self.engines[k].force_schedule()
while len(expecting_task) > 0:
temp_tokens = copy.deepcopy(expecting_task)
for task_token in temp_tokens:
if task_token in expecting_task:
self.persistent[expecting_task[task_token]].resend_task(task_token)
for k in self.engines:
self.engines[k].force_schedule()
time.sleep(0.2)
def delete_task_set(self, task_set: TaskSet):
for k in self.engines:
e = self.engines[k]
e.delete_task_set(task_set) | /russel-python-interface-0.1a3.tar.gz/russel-python-interface-0.1a3/russel_python_interface/multi_engine_network.py | 0.604282 | 0.415017 | multi_engine_network.py | pypi |
from pytz import timezone
from tzlocal import get_localzone
DEFAULT_ENV = "keras"
PST_TIMEZONE = timezone("Asia/Shanghai")
LOCAL_TIMEZONE = get_localzone()
DEFAULT_FLOYD_IGNORE_LIST = """
# Directories to ignore when uploading code to floyd
# Do not add a trailing slash for directories
.*
.git
.eggs
eggs
lib
lib64
parts
sdist
var
.russell
"""
ENV_LIST = ["tensorflow", "tensorflow:py2", "tensorflow-1.0", "tensorflow-1.0:py2", "theano", "theano:py2", "keras",
"keras:py2", "caffe", "caffe:py2", "torch", "torch:py2","pytorch","pytorch:py2","chainer","chainer:py2","mxnet:py2","kur"]
# ENV_LIST = ['tensorflow-1.4', 'keras', 'pytorch-0.3', 'pytorch', 'pytorch-0.3:py2', 'keras:py2', 'default',
# 'tensorflow:py2', 'tensorflow-1.4:py2', 'pytorch:py2', 'tensorflow', 'tensorflow-1.4',
# 'tensorflow-0.12:py2', 'tensorflow-1.1', 'tensorflow-1.0', 'tensorflow-1.3', 'tensorflow-1.2',
# 'pytorch-0.3', 'pytorch-0.2', 'pytorch-0.1', 'chainer-2.0:py2', 'tensorflow-1.0:py2', 'tensorflow',
# 'theano:py2', 'keras', 'torch:py2', 'tensorflow-1.3:py2', 'tensorflow-1.4:py2', 'pytorch-0.3:py2',
# 'chainer-2.0', 'tensorflow-1.1:py2', 'default', 'theano-0.9:py2', 'chainer-1.23:py2', 'torch',
# 'pytorch-0.1:py2', 'theano-0.8:py2', 'tensorflow-1.2:py2', 'keras:py2', 'chainer', 'chainer-1.23', 'kur',
# 'mxnet:py2', 'caffe', 'pytorch:py2', 'theano-0.8', 'theano-0.9', 'chainer:py2', 'pytorch',
# 'pytorch-0.2:py2', 'tensorflow-0.12', 'tensorflow:py2', 'caffe:py2', 'theano', 'tensorflow-1.4',
# 'tensorflow-0.12:py2',
# 'tensorflow-1.1', 'tensorflow-1.0', 'tensorflow-1.3', 'tensorflow-1.2', 'pytorch-0.3', 'pytorch-0.2',
# 'pytorch-0.1', 'chainer-2.0:py2', 'tensorflow-1.0:py2', 'tensorflow', 'theano:py2', 'keras', 'torch:py2',
# 'tensorflow-1.3:py2', 'tensorflow-1.4:py2', 'pytorch-0.3:py2', 'chainer-2.0', 'tensorflow-1.1:py2',
# 'default', 'theano-0.9:py2', 'chainer-1.23:py2', 'torch', 'pytorch-0.1:py2', 'theano-0.8:py2',
# 'tensorflow-1.2:py2', 'keras:py2', 'chainer', 'chainer-1.23', 'kur', 'mxnet:py2', 'caffe', 'pytorch:py2',
# 'theano-0.8', 'theano-0.9', 'chainer:py2', 'pytorch', 'pytorch-0.2:py2', 'tensorflow-0.12',
# 'tensorflow:py2', 'caffe:py2', 'theano']
CPU_INSTANCE_TYPE = "cpu_high"
GPU_INSTANCE_TYPE = "gpu_high"
DEFAULT_INSTANCE_TYPE = CPU_INSTANCE_TYPE
FIRST_STEPS_DOC = """
Start by cloning the sample project
git clone https://github.com/russellcloud/tensorflow-examples.git
cd tensorflow-examples
And _create a russell project inside that.
russell _create --project example-proj
"""
# SimCity4 Loading messages
# https://www.gamefaqs.com/pc/561176-simcity-4/faqs/22135
# Credits: EA Games
LOADING_MESSAGES = [
"Adding Hidden Layers",
"Adjusting Bell Curves",
"Aesthesizing Industrial Grade Containers",
"Aligning Covariance Matrices",
"Applying Feng Shui Backprops",
"Applying Theano Soda Layer",
"Asserting Packed Exemplars",
"Attempting to Lock Back-Buffer",
"Binding Sampling Root System",
"Breeding Neural Nets",
"Building Deep Data Trees",
"Bureacritizing Deep Learning Bureaucracies",
"Calculating Inverse Probability Matrices",
"Calculating SGD Expectoration Trajectory",
"Calibrating Blue Skies",
"Charging Ozone Layer",
"Coalescing Cloud Formations",
"Cohorting Exemplars",
"Collecting Meteor Particles",
"Compounding Inert Tessellations",
"Compressing Fish Files",
"Computing Optimal Bin Packing",
"Concatenating Sub-Contractors",
"Containing Existential Buffer",
"Debarking Ark Ramp",
"Debunching Unionized Commercial Services",
"Deciding What Message to Display Next",
"Decomposing Singular Values",
"Decrementing Tectonic Plates",
"Deleting Ferry Routes",
"Depixelating Inner Mountain Surface Back Faces",
"Depositing Slush Funds",
"Destabilizing Economic Indicators",
"Determining Width of Blast Fronts",
"Deunionizing Bulldozers",
"Dicing Trained Models",
"Diluting Livestock Nutrition Variables",
"Downloading Satellite Terrain Data",
"Exposing Flash Variables to Streak System",
"Extracting Gradient Resources",
"Factoring Pay Scale",
"Fixing Election Outcome Matrix",
"Flood-Filling Ground Water",
"Flushing Pipe Network",
"Gathering Particle Sources",
"Generating Scheduled Jobs",
"Gesticulating Mimes",
"Graphing Container Migration",
"Hiding Willio Webnet Mask",
"Implementing Impeachment Routine",
"Increasing Accuracy of RCI Simulators",
"Increasing Neural Magmafacation",
"Initializing My Sim Tracking Mechanism",
"Initializing CNN Timetable",
"Initializing Robotic Click-Path AI",
"Inserting Sublimated Messages",
"Integrating Multidimensional Curves",
"Integrating Illumination Form Factors",
"Integrating Population Graphs",
"Iterating Cellular Automata",
"Lecturing Errant Subsystems",
"Mixing Dropouts in Genetic Pool",
"Modeling Object Components",
"Mopping Occupant Leaks",
"Normalizing Power",
"Obfuscating Quigley Matrix",
"Overconstraining Dirty Industry Calculations",
"Partitioning City Grid Singularities",
"Perturbing Matrices",
"Pixalating Overfitting Patches",
"Polishing Water Highlights",
"Populating Lot Templates",
"Preparing Sprites for Random Walks",
"Prioritizing Landmarks",
"Projecting Law Enforcement Pastry Intake",
"Realigning Alternate Time Frames",
"Reconfiguring User Mental Processes",
"Relaxing Splines",
"Removing Road Network Speed Bumps",
"Removing Texture Gradients",
"Removing Vehicle Avoidance Behavior",
"Resolving GUID Conflict",
"Reticulating Splines",
"Retracting Phong Shader",
"Retrieving from Back Store",
"Reverse Engineering Image Consultant",
"Routing Neural Network Infanstructure",
"Scrubbing Terrain",
"Searching for Llamas",
"Seeding Architecture Simulation Parameters",
"Sequencing Particles",
"Setting Advisor Moods",
"Setting Inner Deity Indicators",
"Setting Universal Physical Constants",
"Sonically Enhancing Occupant-Free Timber",
"Speculating Stock Market Indices",
"Splatting Transforms",
"Stratifying Ground Layers",
"Sub-Sampling Water Data",
"Synthesizing Gravity",
"Synthesizing Wavelets",
"Time-Compressing Simulator Clock",
"Unable to Reveal Current Activity",
"Weathering Buildings",
"Zeroing Crime Network"
] | /russell-cli-0.7.8.tar.gz/russell-cli-0.7.8/russell/constants.py | 0.544801 | 0.460835 | constants.py | pypi |
from marshmallow import Schema, fields, post_load
from russell.cli.utils import sizeof_fmt
from russell.model.base import BaseModel
from pytz import utc
from russell.constants import LOCAL_TIMEZONE
from russell.date_utils import pretty_date
class DataDetailsSchema(Schema):
state = fields.Str()
size = fields.Str()
uri = fields.Str()
@post_load
def make_data_details(self, data_details):
return DataDetails(**data_details)
class DataDetails(BaseModel):
schema = DataDetailsSchema(strict=True)
def __init__(self,
state,
size,
uri):
self.state = state
self.size = size
self.uri = uri
class ModuleSchema(Schema):
name = fields.Str()
id = fields.Str()
description = fields.Str(allow_none=True)
module_type = fields.Str(allow_none=True)
family_id = fields.Str(allow_none=True)
entity_id = fields.Str(allow_none=True)
version = fields.Int(allow_none=True)
created = fields.DateTime(load_from="date_created")
size = fields.Int(allow_none=True)
state = fields.Str(allow_none=True)
codehash = fields.Str(allow_none=True)
enable_tensorboard = fields.Boolean(default=False)
@post_load
def make_module(self, data):
return Module(**data)
class Module(BaseModel):
schema = ModuleSchema(strict=True)
def __init__(self,
name,
description=None,
id=None,
module_type="code",
family_id=None,
entity_id=None,
version=None,
created=None,
size=0,
uri=None,
state=None,
codehash=None,
enable_tensorboard=None):
self.id = id
self.name = name
self.description = description
self.module_type = module_type
self.family_id = family_id
self.version = version
self.entity_id = entity_id
self.size = size
self.state = state
self.created = self.localize_date(created)
self.uri = uri
self.enable_tensorboard = enable_tensorboard
self.codehash = codehash
def localize_date(self, date):
if not date:
return None
if not date.tzinfo:
date = utc.localize(date)
return date.astimezone(LOCAL_TIMEZONE)
@property
def created_pretty(self):
return pretty_date(self.created)
@property
def size_pretty(self):
if self.size < 1:
return "less than 1 MB"
#self.size is MB to B
return sizeof_fmt(self.size * 1024 * 1024)
class ModuleRequestSchema(Schema):
name = fields.Str()
description = fields.Str()
module_type = fields.Str()
entity_id = fields.Str()
data_type = fields.Str()
version = fields.Integer(allow_none=True)
size = fields.Int(allow_none=True)
nopack = fields.Boolean()
codehash = fields.Str()
@post_load
def make_data(self, data):
return ModuleRequest(**data)
class ModuleRequest(BaseModel):
schema = ModuleRequestSchema(strict=True)
def __init__(self,
name,
entity_id=None,
description=None,
module_type="code",
version=None,
data_type=None,
size=None,
nopack=False,
codehash=None):
self.name = name
self.description = description
self.module_type = module_type
self.version = version
self.entity_id = entity_id
self.data_type = data_type
self.size = size
self.nopack = nopack
self.codehash = codehash | /russell-cli-0.7.8.tar.gz/russell-cli-0.7.8/russell/model/module.py | 0.587943 | 0.196479 | module.py | pypi |
import math
import re
DIGIT_DICT = {
"penny": {
1: "одна",
2: "две",
3: "три",
},
"thousand": {
1: "одна",
2: "две",
3: "три",
},
1: "один",
2: "два",
3: "три",
4: "четыре",
5: "пять",
6: "шесть",
7: "семь",
8: "восемь",
9: "девять",
10: "десять",
11: "одиннадцать",
12: "двенадцать",
13: "тринадцать",
14: "четырнадцать",
15: "пятнадцать",
16: "шестнадцать",
17: "семнадцать",
18: "восемнадцать",
19: "девятнадцать",
20: "двадцать",
30: "тридцать",
40: "сорок",
50: "пятьдесят",
60: "шестьдесят",
70: "семьдесят",
80: "восемьдесят",
90: "девяносто",
100: "сто",
200: "двести",
300: "триста",
400: "четыреста",
500: "пятьсот",
600: "шестьсот",
700: "семьсот",
800: "восемьсот",
900: "девятьсот",
}
class Cost:
"""
Дескриптор стоимости
"""
def __init__(self, name):
self.name = name
def __get__(self, instance, owner):
return instance.__dict__[self.name]
def __set__(self, instance, value):
temp_val = float()
if isinstance(value, str):
try:
temp_val = float(value)
except ValueError:
raise ValueError(
"Проверьте правильность переменной: {}".format(value),
)
elif isinstance(value, int):
temp_val = float(value)
elif isinstance(value, float):
temp_val = value
instance.__dict__[self.name] = round(temp_val, 2)
class CostFormant:
"""
Дескриптор формата вывода стоимости
"""
def __init__(self, name):
self.name = name
def __get__(self, instance, owner):
return instance.__dict__[self.name]
def __set__(self, instance, value):
if not isinstance(value, str):
raise ValueError(
"Формат вывода стоимости должен передаваться в виде строки"
)
i, n = 0, len(value)
while i < n:
ch, i = value[i], i + 1
if ch == "%" and i < n:
ch, i = value[i], i + 1
if ch not in ["s", "S", "r", "R", "p", "P"]:
err_msg = ("Формат вывода стоимости не "
"поддерживает: {}").format(ch)
raise ValueError(err_msg)
instance.__dict__[self.name] = value
class RussianCost:
"""
Класс для преобразования стоимости в строку
"""
_coast = Cost("coast")
out_format = CostFormant("out_format")
def __init__(self, cost, out_format="%S %R %P"):
self._coast = None
self._rubles = None
self._penny = None
self.coast = cost
self.out_format = out_format
def __str__(self):
return re.sub(" +", " ", self.strfcoast(self.out_format).strip())
def __repr__(self):
return re.sub(" +", " ", self.strfcoast(self.out_format).strip())
def _get_rubles_str(self, rubles, index=1, rubles_str=str(), unit=True):
if not rubles:
return re.sub(" +", " ", rubles_str.strip())
if unit:
ten = int(rubles % 100)
rubles_str = {
True: lambda x: "{} {} {}".format(
self.cost_digits_str(x, index),
self.unit_str(index, x),
rubles_str,
),
False: lambda x: "{} {} {} {}".format(
self.cost_digits_str(x - x % 10, index) if x - x % 10
else str(),
self.cost_digits_str(x % 10, index) if x % 10 else str(),
self.unit_str(index, x),
rubles_str,
),
}[10 < ten < 19](ten)
index, rubles, unit = index * 100, rubles // 100, False
else:
hundreds = int((rubles % 10) * 100)
rubles_str = "{} {}".format(
self.cost_digits_str(hundreds, index) if hundreds else str(),
rubles_str,
)
index, rubles, unit = index * 10, rubles // 10, True
return self._get_rubles_str(rubles, index, rubles_str, unit)
def _get_sign(self, is_string=True) -> str:
return {
True: lambda x: str() if x else "минус",
False: lambda x: str() if x else "-",
}[is_string](True if self.coast > 0 else False)
def _get_penny(self, is_string=True) -> str:
if not self._penny:
return str()
unit = self.unit_str(0, self._penny)
if 11 <= self._penny <= 19:
return {
True: "{} {}".format(
self.cost_digits_str(self._penny, 0),
unit,
),
False: "{} {}".format(self._penny, unit),
}[is_string]
units, tens = self._penny % 10, self._penny // 10
return {
True: "{} {} {}".format(
self.cost_digits_str(tens * 10, 0) if tens else str(),
self.cost_digits_str(units, 0) if units else str(),
unit,
),
False: "{} {}".format(self._penny, unit),
}[is_string]
def _get_rubles(self, is_string=True) -> str:
if not self._rubles:
return str()
if not is_string:
unit = self.unit_str(1, self._rubles)
return "{} {}".format(self._rubles, unit)
return self._get_rubles_str(self._rubles)
@property
def coast(self):
return self._coast
@coast.setter
def coast(self, value):
self._coast = value
penny, rubles = math.modf(math.fabs(self._coast))
self._rubles = int(rubles)
self._penny = int(round(penny, 2) * 100)
@staticmethod
def unit_str(index, value):
limit = value % 10
return {
0: "копеек"
if 11 <= value <= 20 or not limit or 5 <= limit <= 9
else ("копейка" if limit == 1 else "копейки"),
1: "рублей"
if 11 <= value <= 20 or not limit or 5 <= limit <= 9
else ("рубль" if limit == 1 else "рубля"),
1000: "тысяч"
if 11 <= value <= 20 or not limit or 5 <= limit <= 9
else ("тысяча" if limit == 1 else "тысячи"),
pow(10, 6): "миллионов"
if 11 <= value <= 20 or not limit or 5 <= limit <= 9
else ("миллион" if limit == 1 else "миллиона"),
pow(10, 9): "миллиардов"
if 11 <= value <= 20 or not limit or 5 <= limit <= 9
else ("миллиард" if limit == 1 else "миллиарда"),
pow(10, 12): "триллионов"
if 11 <= value <= 20 or not limit or 5 <= limit <= 9
else ("триллион" if limit == 1 else "триллиона"),
pow(10, 15): "квадриллионов"
if 11 <= value <= 20 or not limit or 5 <= limit <= 9
else ("квадриллион" if limit == 1 else "квадриллиона"),
pow(10, 18): "квинтиллионов"
if 11 <= value <= 20 or not limit or 5 <= limit <= 9
else ("квинтиллион" if limit == 1 else "квинтиллиона"),
}.get(index)
@staticmethod
def cost_digits_str(cost, index):
cost_digits_str = None
if index == 0:
cost_digits_str = DIGIT_DICT["penny"].get(cost)
if index == 1000:
cost_digits_str = DIGIT_DICT["thousand"].get(cost)
if not cost_digits_str:
cost_digits_str = DIGIT_DICT.get(cost)
return cost_digits_str
def strfcoast(self, fmc: str) -> str:
data = []
i, n = 0, len(fmc)
while i < n:
ch, i = fmc[i], i + 1
if ch == "%" and i < n:
ch, i = fmc[i], i + 1
if ch == "s":
data.append(self._get_sign(is_string=False))
elif ch == "S":
data.append(self._get_sign(is_string=True))
elif ch == "r":
data.append(self._get_rubles(is_string=False))
elif ch == "R":
data.append(self._get_rubles(is_string=True))
elif ch == "p":
data.append(self._get_penny(is_string=False))
elif ch == "P":
data.append(self._get_penny(is_string=True))
else:
data.append(ch)
return "".join(data) | /russian_cost-0.0.2-py3-none-any.whl/russian_coast/russian_cost.py | 0.410756 | 0.401072 | russian_cost.py | pypi |
import random
from russian_names.utils import transliterate_word
class RussianNames:
_data = []
__slots__ = (
'name', 'name_reduction', 'name_max_len', 'patronymic',
'patronymic_reduction', 'patronymic_max_len', 'surname',
'surname_reduction', 'surname_max_len', 'count', 'gender',
'transliterate', 'output_type', 'seed', 'rare', 'uppercase',
'_base',
)
def __init__(self, **kwargs):
prop_defaults = {
'name': True,
'name_reduction': False,
'name_max_len': 10,
'patronymic': True,
'patronymic_reduction': False,
'patronymic_max_len': 10,
'surname': True,
'surname_reduction': False,
'surname_max_len': 10,
'count': 10,
'gender': 0.5,
'transliterate': False,
'output_type': 'str',
'seed': None,
'rare': False,
'uppercase': False,
}
seed = kwargs.pop('seed', None)
if seed is not None:
random.seed(seed)
for prop, default in prop_defaults.items():
setattr(self, prop, kwargs.get(prop, default))
self._fill_base()
def __str__(self):
info = '{} settings:\n'.format(self.__class__.__name__)
for option in self.__slots__:
if option.startswith('_'):
continue
value = getattr(self, option)
info += '\t {}: {}\n '.format(option, value)
return info
def __len__(self):
return self.count
def __iter__(self):
while self.count:
yield self.get_person()
self.count -= 1
@classmethod
def read_data(cls, data):
cls._data = data
def _set_options(self, **kwargs):
refill_base = False
for prop, value in kwargs.items():
if prop.endswith('_len'):
refill_base = True
setattr(self, prop, value)
if refill_base:
self._fill_base()
def _load_set(self, section, max_len):
return list(filter(lambda x: len(x) <= max_len, section.split(' ')))
def _fill_base(self):
self._base = {}
names_m_r = self._data[0]
names_m = self._data[1]
patronymics_m_r = self._data[2]
patronymics_m = self._data[3]
surnames_m = self._data[4]
names_w_r = self._data[5]
names_w = self._data[6]
patronymics_w_r = self._data[7]
patronymics_w = self._data[8]
surnames_w = self._data[9]
if self.rare:
names_m += names_m_r
patronymics_m += patronymics_m_r
names_w += names_w_r
patronymics_w += patronymics_w_r
self._base = {
'man': {
'name': self._load_set(names_m, self.name_max_len),
'patronymic': self._load_set(patronymics_m, self.patronymic_max_len),
'surname': self._load_set(surnames_m, self.surname_max_len),
},
'woman': {
'name': self._load_set(names_w, self.name_max_len),
'patronymic': self._load_set(patronymics_w, self.patronymic_max_len),
'surname': self._load_set(surnames_w, self.surname_max_len),
},
}
def _select_gender_distribution(self):
dice = random.uniform(0, 1)
gender = 0.5
if 0 <= self.gender <= 1:
gender = self.gender
return dice < gender
def _get_object(self, gender, elem_type, reduction=False):
sub = 'man' if gender else 'woman'
base = self._base[sub][elem_type]
name = random.choice(base)
if reduction:
name = name[0] + '.'
if self.transliterate:
name = transliterate_word(name)
if self.uppercase:
name = name.upper()
return name
def _format_person(self, person):
if self.output_type == 'dict':
result = person
elif self.output_type == 'list':
result = list(person.values())
elif self.output_type == 'tuple':
result = tuple(person.values())
elif self.output_type == 'str':
result = ' '.join([v for k, v in person.items() if v is not None])
else:
raise ValueError("Output_type does not have value 'str', 'list, 'tuple' or 'dict'. ")
return result
def get_person(self, **kwargs):
self._set_options(**kwargs)
gender = self._select_gender_distribution()
name = self._get_object(gender, 'name', self.name_reduction)
patronymic = self._get_object(gender, 'patronymic', self.patronymic_reduction)
surname = self._get_object(gender, 'surname', self.surname_reduction)
person = {
'name': name if self.name else None,
'patronymic': patronymic if self.patronymic else None,
'surname': surname if self.surname else None,
}
return self._format_person(person)
def get_batch(self, **kwargs):
batch = ()
for _ in range(self.count):
fio = self.get_person(**kwargs)
batch += (fio, )
return batch
__all__ = (
'RussianNames',
) | /russian_names-0.1.2-py3-none-any.whl/russian_names/names.py | 0.550366 | 0.219296 | names.py | pypi |
import zipfile
from os.path import abspath, join, dirname
from russian_names.consts import TRANSLITERATE_TABLE, SUFFIXES, PATRONYMIC_RULES
def transliterate_word(word):
trans_word = ''
for char in word:
trans_word += TRANSLITERATE_TABLE.get(char, char)
return trans_word
def check_suffix(word, suffixes=SUFFIXES):
for suffix in suffixes:
suffix_len = len(suffix)
if word[-suffix_len:] == suffix:
return word
def read_file(path_in, encoding='cp1251', length=None):
f = open(path_in, 'r', encoding=encoding)
words = f.read().splitlines()[:length]
return words
def save_file(path_out, words, encoding='cp1251', sorting=False):
if sorting:
words.sort()
with open(path_out, "w", encoding=encoding) as out_file:
words_str = "\n".join(words)
out_file.write(words_str)
def patronymic_from_name(name, gender='male'):
"""
:param name: string
:param gender: bool
:return: string
"""
last_char = name[-1]
for rule, suffix in PATRONYMIC_RULES.items():
cut, m_suffix, w_suffix = suffix
for char in rule:
if last_char == char:
if gender == 'male':
suffix = m_suffix
else:
suffix = w_suffix
return name[:-1] + suffix if cut else name + suffix
def create_patronymics(names):
"""
Create patronymics from list of man names
:param names: list
:return: set
"""
patronymics = set()
for name in names:
patronymic = patronymic_from_name(name)
if patronymic:
patronymics.add(patronymic)
return patronymics
_BASE_PATH = '_data.zip'
_BASE_NAME = 'base.txt'
def load_data():
path = abspath(join(dirname(__file__), _BASE_PATH))
data_zip = zipfile.ZipFile(path)
data = data_zip.read(_BASE_NAME)
data_decoded = data.decode('utf8')
return data_decoded.splitlines()
__all__ = (
'load_data',
'read_file',
'save_file',
'check_suffix',
'transliterate_word',
) | /russian_names-0.1.2-py3-none-any.whl/russian_names/utils.py | 0.431105 | 0.176281 | utils.py | pypi |
from __future__ import unicode_literals
import re
from decimal import Decimal
from six import ensure_text, string_types
class NumberToWords(object):
"""
Переводит дробное или целое число в количественные числительные
без указания единиц измерения.
"""
words0 = (1, '', '', '')
words1 = (1, '', '', '', 3)
def __init__(self, words_map=None):
if words_map:
self.words0, self.words1 = words_map
# Проверка для наследуемых классов.
assert isinstance(self.words0[0], int)
assert isinstance(self.words0[1], string_types)
assert isinstance(self.words0[2], string_types)
assert isinstance(self.words0[3], string_types)
assert isinstance(self.words1[0], int)
assert isinstance(self.words1[1], string_types)
assert isinstance(self.words1[2], string_types)
assert isinstance(self.words1[3], string_types)
assert isinstance(self.words1[4], int)
self.numbers0 = (
'',
('один', 'одна', 'одно'),
('два', 'две', 'два'),
'три',
'четыре',
'пять',
'шесть',
'семь',
'восемь',
'девять',
)
self.numbers11 = (
'десять',
'одиннадцать',
'двенадцать',
'тринадцать',
'четырнадцать',
'пятнадцать',
'шестнадцать',
'семнадцать',
'восемнадцать',
'девятнадцать',
)
self.numbers10 = [
'',
'десять',
'двадцать',
'тридцать',
'сорок',
'пятьдесят',
'шестьдесят',
'семьдесят',
'восемьдесят',
'девяносто',
]
self.numbers100 = [
'',
'сто',
'двести',
'триста',
'четыреста',
'пятьсот',
'шестьсот',
'семьсот',
'восемьсот',
'девятьсот',
]
self.DIGITAL_WORDS = [
[2, 'тысяча', 'тысячи', 'тысяч'],
[1, 'миллион', 'миллиона', 'миллионов'],
[1, 'миллиард', 'миллиарда', 'миллиардов'],
[1, 'триллион', 'триллиона', 'триллионов'],
[1, 'квадриллион', 'квадриллиона', 'квадриллионов'],
[1, 'квинтиллион', 'квинтиллиона', 'квинтиллионов'],
[1, 'секстиллион', 'секстиллиона', 'секстиллионов'],
[1, 'септиллион', 'септиллиона', 'септиллионов'],
[1, 'октиллион', 'октиллиона', 'октиллионов'],
[1, 'нониллион', 'нониллиона', 'нониллионов'],
[1, 'дециллион', 'дециллиона', 'дециллионов'],
]
def morph(self, value, words):
# value = '121343', words = [2, 'штука', 'штуки', 'штук']
if not value:
return
if len(value) < 3:
value = ('000%s' % value)[-3:]
pre = int(value[-2:-1])
last = int(value[-1])
if last < 1 or pre == 1:
return words[3]
elif last < 2:
return words[1]
elif last < 5:
return words[2]
return words[3]
def parse(self, value, words):
s = '%d' % int(value)
s = '0' * (3 - len(s) % 3) + s
triples = [''.join(x) for x in zip(*[iter(s)] * 3)]
result = []
length = len(triples)
all_words = [words] + self.DIGITAL_WORDS
for i, part in enumerate(triples):
if part == '000':
continue
words_row = all_words[length - i - 1]
x = self.numbers100[int(part[0])]
if x:
result.append(x)
if part[1] == '1':
result.append(self.numbers11[int(part[2])])
else:
if part[1] != '0':
result.append(self.numbers10[int(part[1])])
x = self.numbers0[int(part[2])]
if x and not isinstance(x, string_types):
x = x[words_row[0] - 1]
result.append(x)
result.append(self.morph(part, words_row))
return ' '.join(result)
def prepare(self, value):
if isinstance(value, (Decimal, int)):
value = str(value)
if 'E' in value:
raise ValueError(
'Decimal value contains the Exponent: %s' % value
)
elif isinstance(value, float):
# Учитывая особенности float в Python:
# '%f' % 999999999999999.9 => '999999999999999.875000'
# '%f' % 999999999999999.99 => '1000000000000000.000000'
if value > 999999999999999.9:
raise ValueError(
'Float value must be less than 999999999999999.9 '
'or use Decimal value.'
)
value = '%f' % value
strings = ensure_text(value).split('.')[:2]
if len(strings) == 1:
strings.append('')
elif strings[1]:
count = self.words1[4]
strings[1] = (strings[1] + ('0' * count))[:count]
result = [self.parse(strings[0], self.words0)]
if strings[1]:
result.append(self.parse(strings[1], self.words1))
return re.sub(r'\s+', ' ', ' '.join(result)).strip()
class NumberToRoubles(NumberToWords):
"""
Переводит дробное или целое число в количественные числительные
с рублями и копейками в качестве единиц измерения.
"""
words0 = (1, 'рубль', 'рубля', 'рублей')
words1 = (2, 'копейка', 'копейки', 'копеек', 2)
class NumberToTons(NumberToWords):
"""
Переводит дробное или целое число в количественные числительные
с тоннами и килограммами в качестве единиц измерения.
"""
words0 = (2, 'тонна', 'тонны', 'тонн')
words1 = (1, 'килограмм', 'килограмма', 'килограммов', 3)
class NumberToKilograms(NumberToWords):
"""
Переводит дробное или целое число в количественные числительные
с килограммами и граммами в качестве единиц измерения.
"""
words0 = (1, 'килограмм', 'килограмма', 'килограммов')
words1 = (1, 'грамм', 'грамма', 'граммов', 3)
class TextToNumbers(object):
"""
Переводит найденные в тексте количественные числительные в числа.
"""
def __init__(self):
# Числа, после которых либо идёт слово "тысяча" (и далее),
# либо это конец числительного.
self.numbers0 = {
'ноль': 0,
'один': 1, 'одна': 1, 'одно': 1,
'два': 2, 'две': 2, 'два': 2,
'три': 3,
'четыре': 4,
'пять': 5,
'шесть': 6,
'семь': 7,
'восемь': 8,
'девять': 9,
}
# Числа, после которых либо идёт слово "тысяча" (и далее),
# либо это конец числительного.
self.numbers1 = {
'десять': 10,
'одиннадцать': 11,
'двенадцать': 12,
'тринадцать': 13,
'четырнадцать': 14,
'пятнадцать': 15,
'шестнадцать': 16,
'семнадцать': 17,
'восемнадцать': 18,
'девятнадцать': 19,
}
# Группа чисел, которые либо...
self.numbers2 = {
'двадцать': 20,
'тридцать': 30,
'сорок': 40,
'пятьдесят': 50,
'шестьдесят': 60,
'семьдесят': 70,
'восемьдесят': 80,
'девяносто': 90,
}
self.numbers3 = {
'сто': 100,
'двести': 200,
'триста': 300,
'четыреста': 400,
'пятьсот': 500,
'шестьсот': 600,
'семьсот': 700,
'восемьсот': 800,
'девятьсот': 900,
}
self.power_numbers = {
'тысяча': 3, 'тысячи': 3, 'тысяч': 3,
'миллион': 6, 'миллиона': 6, 'миллионов': 6,
'миллиард': 9, 'миллиарда': 9, 'миллиардов': 9,
'триллион': 12, 'триллиона': 12, 'триллионов': 12,
'квадриллион': 15, 'квадриллиона': 15, 'квадриллионов': 15,
'квинтиллион': 18, 'квинтиллиона': 18, 'квинтиллионов': 18,
'секстиллион': 21, 'секстиллиона': 21, 'секстиллионов': 21,
'септиллион': 24, 'септиллиона': 24, 'септиллионов': 24,
'октиллион': 27, 'октиллиона': 27, 'октиллионов': 27,
'нониллион': 30, 'нониллиона': 30, 'нониллионов': 30,
'дециллион': 33, 'дециллиона': 33, 'дециллионов': 33,
}
self.all_words = {}
self.all_words.update(self.numbers0)
self.all_words.update(self.numbers1)
self.all_words.update(self.numbers2)
self.all_words.update(self.numbers3)
self.all_words.update(self.power_numbers)
def prepare(self, value):
if not isinstance(value, string_types):
value = str(value)
value = ensure_text(value)
source_words = [w for w in value.split(' ') if w]
sections = (self.numbers0, self.numbers1, self.numbers2, self.numbers3,
self.power_numbers)
# Сначала разбиваем строку на группы слов, где каждая группа - это
# либо подряд идущие обычные слова (не числительные),
# либо количественные числительные, принадлежащие одной величине.
groups = []
group = None
for_digit = False
for word in source_words:
_word = word.lower()
is_digit = _word in self.all_words
if not group or for_digit != is_digit:
group = [_word if is_digit else word]
for_digit = is_digit
groups.append([group, for_digit])
continue
elif not is_digit:
group.append(word)
continue
prev = group[-1]
# Числительные не могут быть из одной и той же секции.
found = False
for section in sections:
if prev in section and _word in section:
group = [_word]
groups.append([group, for_digit])
found = True
break
if found:
continue
# Когда за единицами или десятком не следует тысячи, то
# это новое число.
end_digit = prev in self.numbers0 or prev in self.numbers1
if end_digit and _word not in self.power_numbers:
group = [_word]
groups.append([group, for_digit])
continue
# Перечисление десятков, а за ними сотен - это тоже новые числа.
if prev in self.numbers2 and _word in self.numbers3:
group = [_word]
groups.append([group, for_digit])
continue
# Слово принадлежит текущему числительному.
group.append(_word)
result = []
# Теперь преобразовываем количественные числительные в числа.
for group, for_digit in groups:
if not for_digit:
result.extend(group)
continue
total = 0
temp = 1 if group[0] in self.power_numbers else 0
for number in group:
if number in self.power_numbers:
total += temp * (10 ** self.power_numbers[number])
temp = 0
else:
temp += self.all_words[number]
total += temp
result.append('%d' % total)
return re.sub(r'\s+', ' ', ' '.join(result)).strip()
class TextToPhone(TextToNumbers):
"""
Переводит найденные в тексте количественные числительные в номер телефона.
"""
def prepare(self, value):
value = super(TextToPhone, self).prepare(value)
phone = []
for i, v in enumerate(value.split(' ')):
if i == 0 and v == 'плюс':
phone.append('+')
elif not v.isdigit():
break
else:
phone.append(v)
return ''.join(phone) | /russian-numerals-0.2b0.tar.gz/russian-numerals-0.2b0/russian_numerals/handlers.py | 0.492188 | 0.444083 | handlers.py | pypi |
from difflib import SequenceMatcher
from sentence_transformers import util
from collections import defaultdict
from scipy.spatial import distance
import logging
import nltk
logger = logging.getLogger(__name__)
def range_by_allcs(sentences, sent, smodel, threshold=0.7):
sentences = sentences + [sent]
hypothesis = []
paraphrases = util.paraphrase_mining(smodel, sentences)
good_hyp = set()
for paraphrase in paraphrases:
score, i, j = paraphrase
if threshold < score < 1.00:
if sentences[i] == sent:
good_hyp.add(sentences[j])
elif sentences[j] == sent:
good_hyp.add(sentences[i])
if len(list(good_hyp)) > 1:
hypothesis.extend(list(good_hyp))
else:
if paraphrases[0][1] != sent:
hypothesis.append(sentences[paraphrases[0][1]])
else:
hypothesis.append(sentences[paraphrases[0][2]])
return hypothesis
def range_by_cs(sentences, sent, smodel, threshold=0.9):
try:
sentence_embeddings = smodel.encode(sentences)
origin_emb = smodel.encode([sent])
best_cands = []
for sentence, embedding in zip(sentences, sentence_embeddings):
if sent not in sentence:
if SequenceMatcher(None, sent, sentence).ratio() < 0.95:
score = 1 - distance.cosine(embedding, origin_emb)
if score >= threshold:
if score != 1.0:
if [score, sentence] not in best_cands:
best_cands.append([score, sentence])
hypothesis = sorted(best_cands)
hypothesis = list([val for [_, val] in hypothesis])
except Exception as e:
logger.warning("Can't measure embeddings scores. Error: " + str(e))
cands = []
for sentence in sentences:
if sent not in sentence:
if SequenceMatcher(None, sent, sentence).ratio() < 0.95:
cands.append(sentence)
hypothesis = list(set(cands))
return hypothesis
def range_candidates(sentences, sent, smodel, top_k=4, threshold=0.9, strategy="cs"):
"""
Range all possible candidates by one of the strategies
:param sentences: candidates
:param sent: origin sentence reference
:param smodel: sentence transformer model
:param top_k: 3
:param threshold: threshold for cosine similarity score
:param strategy: best by cosine similarity between sentence origin and generated - flag "cs",
best by cosine similary between all generated pairs - flag "all_cs"
:return: list: top k best candidates
"""
sentences = list(set(sentences))
if strategy == "cs":
hypothesis = range_by_cs(sentences, sent, smodel, threshold=threshold)[:top_k]
else:
hypothesis = range_by_allcs(sentences, sent, smodel, threshold=threshold)[:top_k]
return hypothesis
def get_scores(ngeval, best_candidates, sentence):
"""
Average metrics of candidates
:param ngeval:
:param best_candidates:
:param sentence:
:return: metrics
"""
average_metrics = defaultdict(list)
if best_candidates:
for hyp in best_candidates:
metrics_dict = ngeval.compute_individual_metrics([sentence.lower()], hyp.lower())
for key, value in metrics_dict.items():
average_metrics[key].append(value)
metrics = {}
for key, value in average_metrics.items():
metrics[key] = sum(value)/len(value)
return metrics
def check_input(sentence):
warning = None
if len(sentence) <= 7:
warning = "Your sentence is too short. The results can be strange."
sentences = nltk.sent_tokenize(sentence)
if len(sentences) > 1:
warning = "There are more than one sentence! We split it and paraphrase separately."
return warning, sentences | /russian_paraphrasers-0.0.3-py3-none-any.whl/russian_paraphrasers/candidates_filter_metrics.py | 0.608594 | 0.407098 | candidates_filter_metrics.py | pypi |
import torch
from typing import Dict
from transformers import MT5ForConditionalGeneration, AutoTokenizer
from russian_paraphrasers.utils import set_seed
from russian_paraphrasers.paraphrasers import Paraphraser
from russian_paraphrasers.candidates_filter_metrics import (
get_scores,
range_candidates,
check_input,
)
import logging
class Mt5Paraphraser(Paraphraser):
def __init__(
self,
model_name: str = "mt5-large",
range_cand: bool = False,
make_eval: bool = False,
tokenizer_path: str = "default",
pretrained_path: str = "default"
):
"""
The class for Mt5 hugging_face interface.
:param model_name: "mt5-small" or "mt5-base" or "mt5-large". Will call models
:param range_cand: True/False. Range candidates
:param make_eval: True/False. Make or not average evaluation for n samples.
:param tokenizer_path: "default" or some model name in hugging_face format
:param pretrained_path: "default" or some model name in hugging_face format
"""
super().__init__(model_name, range_cand, make_eval, tokenizer_path, pretrained_path)
self.logger = logging.getLogger(__name__)
if tokenizer_path == "default":
tokenizer_path = "alenusch/{}-ruparaphraser".format(
model_name.replace("-", "")
)
if pretrained_path == "default":
pretrained_path = "alenusch/{}-ruparaphraser".format(
model_name.replace("-", "")
)
self.tokenizer_path = tokenizer_path
self.pretrained_path = pretrained_path
self.load()
def load(self):
set_seed(42)
_model = MT5ForConditionalGeneration.from_pretrained(self.pretrained_path)
self.tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_path)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = _model.to(self.device)
self.logger.info(
"Pretrained file and tokenizer for model {} were loaded.".format(
self.model_name
)
)
def generate(
self,
sentence: str,
n: int = 10,
temperature: float = 1.0,
top_k: int = 10,
top_p: float = 0.95,
max_length: int = 150,
repetition_penalty: float = 1.5,
threshold: float = 0.8,
strategy: str = "cs"
) -> Dict:
"""
Generate paraphrase. You can set parameters
:param sentence: str: obligatory one sentence
:param n: number of sequences to generate
:param temperature: temperature
:param top_k: top_k
:param top_p: top_p
:param max_length: max_length
:param repetition_penalty: repetition_penalty
:param threshold: param for cosine similarity range
:param strategy: param for range strategy
:return: dict with fields
obligatory: origin, predictions;
optional: warning, best_candidates, average_metrics
"""
result = {"origin": sentence, "results": []}
warning, sentences = check_input(sentence)
if warning:
result["warning"] = warning
for sentence in sentences:
final_outputs = []
lsentence = "перефразируй: " + sentence + "</s>"
encoding = self.tokenizer.encode_plus(
lsentence, pad_to_max_length=True, return_tensors="pt"
)
input_ids, attention_masks = (
encoding["input_ids"].to(self.device),
encoding["attention_mask"].to(self.device),
)
beam_outputs = self.model.generate(
input_ids=input_ids,
attention_mask=attention_masks,
do_sample=True,
max_length=max_length,
temperature=temperature,
top_k=top_k,
top_p=top_p,
early_stopping=True,
num_return_sequences=n,
repetition_penalty=repetition_penalty,
)
for beam_output in beam_outputs:
sent = self.tokenizer.decode(
beam_output,
skip_special_tokens=True,
clean_up_tokenization_spaces=True,
)
if sent.lower() != sentence.lower() and sent not in final_outputs:
final_outputs.append(sent)
sentence_res = {"predictions": final_outputs}
best_candidates = []
if self.range_cand:
best_candidates = range_candidates(
final_outputs, sentence, self.smodel,
threshold=threshold, strategy=strategy
)
sentence_res["best_candidates"] = best_candidates
if self.make_eval:
if not best_candidates:
best_candidates = final_outputs
metrics = get_scores(self.ngeval, best_candidates, sentence)
sentence_res["average_metrics"] = metrics
result["results"].append(sentence_res)
return result | /russian_paraphrasers-0.0.3-py3-none-any.whl/russian_paraphrasers/paraphrasers/paraphraser_mt5.py | 0.876714 | 0.224374 | paraphraser_mt5.py | pypi |
import logging
from abc import abstractmethod
from sentence_transformers import SentenceTransformer, util
from nlgeval import NLGEval
from typing import Any
import torch
import nltk
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
class Paraphraser:
def __init__(
self,
model_name: str = "gpt2",
range_cand: bool = False,
make_eval: bool = False,
tokenizer_path: str = "default",
pretrained_path: str = "default"
) -> None:
"""
Possible models: mt5-large, mt5-base, mt5-small, gpt2, gpt3
:param model_name:
:param make_filter:
:param cache_file_path:
"""
self.logger = logging.getLogger(__name__)
self.tokenizer_path = tokenizer_path
self.pretrained_path = pretrained_path
self.make_eval = make_eval
self.range_cand = range_cand
self.device = torch.device("cpu")
if self.range_cand:
self.smodel = SentenceTransformer("paraphrase-xlm-r-multilingual-v1")
if self.make_eval:
self.ngeval = NLGEval(
metrics_to_omit=[
"EmbeddingAverageCosineSimilairty",
"CIDEr",
"METEOR",
"SkipThoughtCS",
"VectorExtremaCosineSimilarity",
"GreedyMatchingScore",
]
)
self.model_name = model_name
self._check_model(model_name)
def _check_model(self, model_name: str) -> bool:
__models_dict = ["mt5-large", "mt5-base", "mt5-small", "gpt2", "gpt3"]
if model_name not in __models_dict:
self.logger.error(
"There is no such a model for paraphraser! Use one of these: mt5-large, mt5-base, mt5-small, gpt2, gpt3"
)
raise ValueError(
"It looks like you try to call model we do not have or you write the models name wrong."
)
return True
@abstractmethod
def load(self):
raise NotImplemented
@abstractmethod
def generate(self, *args, **kwargs) -> Any:
raise NotImplemented | /russian_paraphrasers-0.0.3-py3-none-any.whl/russian_paraphrasers/paraphrasers/core.py | 0.719876 | 0.169234 | core.py | pypi |
from typing import Dict
from russian_paraphrasers.paraphrasers import Paraphraser
from russian_paraphrasers.candidates_filter_metrics import (
get_scores,
range_candidates,
check_input,
)
from russian_paraphrasers.utils import clean
from transformers import (
GPT2LMHeadModel,
GPT2Tokenizer,
)
import logging
import torch
class GPTParaphraser(Paraphraser):
def __init__(
self,
model_name: str = "gpt2",
range_cand: bool = False,
make_eval: bool = False,
tokenizer_path: str = "default",
pretrained_path: str = "default"
):
"""
The class for GPT2 hugging_face interface.
:param model_name: "gpt3" or "gpt2". Will call models
:param range_cand: True/False. Range candidates
:param make_eval: True/False. Make or not average evaluation for n samples.
:param tokenizer_path: "default" or some model name in hugging_face format
:param pretrained_path: "default" or some model name in hugging_face format
"""
super().__init__(model_name, range_cand, make_eval, tokenizer_path, pretrained_path)
self.logger = logging.getLogger(__name__)
if tokenizer_path == "default":
tokenizer_path = "alenusch/ru{}-paraphraser".format(model_name)
if pretrained_path == "default":
pretrained_path = "alenusch/ru{}-paraphraser".format(model_name)
self.tokenizer_path = tokenizer_path
self.pretrained_path = pretrained_path
self.batch_size = 1
self.load()
def load(self):
self.tokenizer = GPT2Tokenizer.from_pretrained(self.tokenizer_path)
model = GPT2LMHeadModel.from_pretrained(self.pretrained_path)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = model.to(self.device)
self.logger.info(
"Pretrained file and tokenizer for model {} were loaded. {}, {}".format(
self.model_name, self.tokenizer_path, self.pretrained_path
)
)
def generate(
self,
sentence: str,
n: int = 10,
temperature: float = 1.0,
top_k: int = 10,
top_p: float = 0.9,
max_length: int = 100,
repetition_penalty: float = 1.5,
threshold: float = 0.7,
strategy: str = "cs",
stop_token: str = "</s>"
) -> Dict:
"""
Generate paraphrase. You can set parameters
:param sentence: str: obligatory one sentence
:param n: number of sequences to generate
:param temperature: temperature
:param top_k: top_k
:param top_p: top_p
:param max_length: max_length (default is -1)
:param repetition_penalty: repetition_penalty
:param threshold: param for cosine similarity range
:param strategy: param for range strategy
:param stop_token </s> for gpt2s
:return: dict with fields
obligatory: origin, predictions;
optional: warning, best_candidates, average_metrics
"""
result = {"origin": sentence, "results": []}
warning, sentences = check_input(sentence)
if warning:
result["warning"] = warning
for sentence in sentences:
my_sentence = "<s>{} === ".format(sentence)
encoded_prompt = self.tokenizer.encode(
my_sentence, add_special_tokens=False, return_tensors="pt"
)
encoded_prompt = encoded_prompt.to(self.device)
if encoded_prompt.size()[-1] == 0:
input_ids = None
else:
input_ids = encoded_prompt
output_sequences = self.model.generate(
input_ids=input_ids,
max_length=max_length,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
num_return_sequences=n,
)
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
generated_sequences = []
for generated_sequence_idx, generated_sequence in enumerate(
output_sequences
):
generated_sequence = generated_sequence.tolist()
text = self.tokenizer.decode(
generated_sequence, clean_up_tokenization_spaces=True
)
text = text[: text.find(stop_token) if stop_token else None]
text = text.split("</s>")[0].split("\n")[0]
total_sequence = (
text[
len(
self.tokenizer.decode(
encoded_prompt[0], clean_up_tokenization_spaces=True
)
):
]
)
total_sequence = clean(total_sequence)
generated_sequences.append(total_sequence)
sentence_res = {"predictions": generated_sequences}
best_candidates = []
if self.range_cand:
best_candidates = range_candidates(
generated_sequences, sentence, self.smodel,
threshold=threshold, strategy=strategy)
sentence_res["best_candidates"] = best_candidates
if self.make_eval:
if not best_candidates:
best_candidates = generated_sequences
metrics = get_scores(self.ngeval, best_candidates, sentence)
sentence_res["average_metrics"] = metrics
result["results"].append(sentence_res)
return result | /russian_paraphrasers-0.0.3-py3-none-any.whl/russian_paraphrasers/paraphrasers/paraphraser_gpt.py | 0.883223 | 0.232397 | paraphraser_gpt.py | pypi |
import re
from collections import Counter
import markdown2
from russian_protowhat.Feedback import Feedback
from russian_protowhat.Test import Test
"""
This file holds the reporter class.
"""
class TestRunner:
def __init__(self):
self.tests = []
def do_test(self, test):
"""Raise failing test.
Raise a ``TestFail`` object, containing the feedback message and highlight information.
"""
result = None
feedback = None
test()
if isinstance(test, Test):
self.tests.append(test)
result = test.result
if not result:
feedback = test.get_feedback()
return result, feedback
def do_tests(self, tests):
return [self.do_test(test) for test in tests]
@property
def failures(self):
return list(filter(lambda test: test.result is False, self.tests))
@property
def has_failed(self):
return len(self.failures) > 0
class TestRunnerProxy(TestRunner):
# Checks using this might be split into more atomic checks
def __init__(self, runner: TestRunner):
super().__init__()
self.runner = runner
def do_test(self, test):
if isinstance(test, Test):
self.tests.append(test)
return self.runner.do_test(test)
class Reporter(TestRunnerProxy):
"""Do reporting.
This class holds the feedback- or success message and tracks whether there are failed tests
or not. All tests are executed trough do_test() in the Reporter.
"""
# This is the offset for ANTLR
ast_highlight_offset = {"column_start": 1, "column_end": 1}
def __init__(self, runner=None, errors=None, highlight_offset=None):
super().__init__(runner or TestRunner())
self.fail = False
self.errors = errors
self.errors_allowed = False
self.highlight_offset = highlight_offset or {}
self.success_msg = "Great work!"
def get_errors(self):
return self.errors
def allow_errors(self):
self.errors_allowed = True
def build_failed_payload(self, feedback: Feedback):
highlight = Counter()
code_highlight = feedback.get_highlight_data()
path = code_highlight.get("path", None)
if path is not None:
del code_highlight["path"]
if code_highlight:
highlight.update(self.highlight_offset)
if "line_start" in highlight and "line_end" not in highlight:
highlight["line_end"] = highlight["line_start"]
highlight.update(code_highlight)
highlight.update(self.ast_highlight_offset)
if path is not None:
highlight["path"] = str(path)
return {
"correct": False,
"message": Reporter.to_html(feedback.message),
**highlight,
}
def build_final_payload(self):
if self.fail or self.errors and not self.errors_allowed:
feedback_msg = "Ваш код вызвал ошибку. Исправьте и попробуйте еще раз!"
return {"correct": False, "message": Reporter.to_html(feedback_msg)}
else:
return {"correct": True, "message": Reporter.to_html(self.success_msg)}
@staticmethod
def to_html(msg):
return re.sub("<p>(.*)</p>", "\\1", markdown2.markdown(msg)).strip() | /russian_protowhat-1.13.1.tar.gz/russian_protowhat-1.13.1/russian_protowhat/Reporter.py | 0.714827 | 0.326755 | Reporter.py | pypi |
from typing import TypeVar, Generic, Union, List, Dict, Tuple
from collections import Mapping
from ast import NodeVisitor
import inspect
import importlib
from russian_protowhat.utils_messaging import get_ord
class Selector(NodeVisitor):
def __init__(self, target_cls, target_cls_name=None, strict=True, priority=None, include_head=False):
self.target_cls = target_cls
self.target_cls_name = target_cls_name
self.strict = strict
self.priority = priority if priority else self._get_node_priority(target_cls)
self.out = []
self.include_head = include_head
def visit(self, node, head=False):
"""
Find child nodes at the first level
and keep searching if their children have a lower priority.
If self.include_head is True then the starting node will also be considered.
"""
if (not head or self.include_head) and self.is_match(node):
self.out.append(node)
if self.has_priority_over(node) or head:
return super().visit(node)
def visit_list(self, lst):
# this allows the root to be a list
for item in lst:
self.visit(item)
def is_match(self, node):
if self.strict:
if type(node) is self.target_cls:
return True
else:
return False
else:
if isinstance(node, self.target_cls) and (
self.target_cls_name is None
or self.target_cls_name == node.__class__.__name__
):
return True
else:
return False
def has_priority_over(self, node):
return self.priority > self._get_node_priority(node)
def _get_node_priority(self, node):
return getattr(node, "_priority", 0)
T = TypeVar("T")
class DispatcherInterface(Generic[T]):
def find(self, name: str, node: T, *args, **kwargs) -> Union[List[T], Dict[str, T]]:
# todo: document signature, strategy kwarg (depth/breadth first)
raise NotImplementedError
def select(self, path: Union[str, Tuple], node: T) -> Union[T, List[T]]:
raise NotImplementedError
@staticmethod
def _path_str_to_list(path):
steps = path.split(".")
def parse_int(x):
try:
return int(x)
except ValueError:
return x
return [parse_int(step) for step in steps if step]
def parse(self, code: str):
raise NotImplementedError
class Dispatcher(DispatcherInterface):
def __init__(self, node_cls, nodes=None, ast_mod=None, safe_parsing=True):
"""Wrapper to instantiate and use a Selector using node names."""
self.node_cls = node_cls
self.nodes = nodes or getattr(ast_mod, "nodes", {})
self.ast_mod = ast_mod
self.safe_parsing = safe_parsing
self.ParseError = getattr(
self.ast_mod, "ParseError", type("ParseError", (Exception,), {})
)
def find(self, name, node, *args, **kwargs):
if self.nodes and name in self.nodes:
ast_cls = self.nodes[name]
strict_selector = True
else:
ast_cls = self.node_cls
strict_selector = False
selector = Selector(
ast_cls, target_cls_name=name, strict=strict_selector, *args, **kwargs
)
selector.visit(node, head=True)
return selector.out
def select(self, spec, node):
result = node
if isinstance(spec, tuple):
raise ValueError(
"This dispatcher currently doesn't support tuple specs for select"
)
if isinstance(spec, str):
spec = self._path_str_to_list(spec)
for step in spec:
if isinstance(step, str):
if isinstance(result, Mapping):
result = result.get(step, None)
else:
result = getattr(result, step, None)
elif isinstance(step, int):
result = result[step] if len(result) > step else None
if result is None:
break
return result
def parse(self, code):
try:
return self.ast_mod.parse(code, strict=True)
except self.ParseError as e:
if self.safe_parsing:
return e
else:
raise e
def describe(self, node, msg, field="", **kwargs):
speaker = getattr(self.ast_mod, "speaker", None)
if kwargs.get("index") is not None:
phrase = "{} entry in the " if field else "{} "
kwargs["index"] = phrase.format(get_ord(kwargs["index"] + 1))
else:
kwargs["index"] = ""
if speaker:
return self.ast_mod.speaker.describe(node, field=field, fmt=msg, **kwargs)
@classmethod
def from_module(cls, mod):
if isinstance(mod, str):
mod = importlib.import_module(mod)
ast_nodes = getattr(mod, "nodes", None)
if ast_nodes is None:
ast_nodes = {
k: v
for k, v in vars(mod).items()
if (inspect.isclass(v) and issubclass(v, mod.AstNode))
}
dispatcher = cls(mod.AstNode, nodes=ast_nodes, ast_mod=mod)
return dispatcher | /russian_protowhat-1.13.1.tar.gz/russian_protowhat-1.13.1/russian_protowhat/selectors.py | 0.685529 | 0.206994 | selectors.py | pypi |
from pathlib import Path
def check_file(
state,
path,
missing_msg="Вы создали файл `{}`?",
is_dir_msg="Хотите проверить файл `{}`, но нашли каталог.",
parse=True,
solution_code=None,
):
"""Test whether file exists, and make its contents the student code.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
path: expected location of the file
missing_msg: feedback message if no file is found in the expected location
is_dir_msg: feedback message if the path is a directory instead of a file
parse: If ``True`` (the default) the content of the file is interpreted as code in the main exercise technology.
This enables more checks on the content of the file.
solution_code: this argument can be used to pass the expected code for the file
so it can be used by subsequent checks.
Note:
This SCT fails if the file is a directory.
:Example:
To check if a user created the file ``my_output.txt`` in the subdirectory ``resources``
of the directory where the exercise is run, use this SCT::
Ex().check_file("resources/my_output.txt", parse=False)
"""
path_obj = Path(path)
if not path_obj.exists():
state.report(missing_msg.format(path)) # test file exists
if path_obj.is_dir():
state.report(is_dir_msg.format(path)) # test its not a dir
code = get_file_content(path_obj)
sol_kwargs = {"solution_code": solution_code, "solution_ast": None}
if solution_code:
sol_kwargs["solution_ast"] = (
state.parse(solution_code, test=False) if parse else False
)
child_state = state.to_child(
append_message="Мы проверили файл `{}`. ".format(path),
student_code=code,
student_ast=state.parse(code) if parse else False,
**sol_kwargs
)
child_state.path = path_obj # .parent + .name
return child_state
def has_dir(state, path, msg="Вы создали каталог `{}`?"):
"""Test whether a directory exists.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
path: expected location of the directory
msg: feedback message if no directory is found in the expected location
:Example:
To check if a user created the subdirectory ``resources``
in the directory where the exercise is run, use this SCT::
Ex().has_dir("resources")
"""
if not Path(path).is_dir():
state.report(msg.format(path))
return state
# helper functions
def load_file(relative_path, prefix=""):
# the prefix can be partialed
# so it's not needed to copy the common part of paths
path = Path(prefix, relative_path)
return get_file_content(path)
def get_file_content(path):
if not isinstance(path, Path):
path = Path(path)
try:
content = path.read_text(encoding="utf-8")
except:
content = None
return content | /russian_protowhat-1.13.1.tar.gz/russian_protowhat-1.13.1/russian_protowhat/checks/check_files.py | 0.721939 | 0.638525 | check_files.py | pypi |
from __future__ import absolute_import, unicode_literals
import collections
from .utils import bigrams, find_shortest_path
class NoConvertPath(Exception):
pass
class Registry(object):
def __init__(self):
# Directed graph of all possible transformations.
# _registry['from']['to'] -> transformation function
self._registry = collections.defaultdict(dict)
def add(self, type_from, type_to, method):
"""
Register :param:``method`` as conversion method from
:param:``type_from`` to :param:``type_to``.
:param:``method`` signature should receive object of type ``type_from``
and return an object of type ``type_to``.
"""
self._registry[type_from][type_to] = method
def path(self, type_from, type_to):
"""
Return a list of conversion steps.
"""
_path = find_shortest_path(self._registry, type_from, type_to)
if _path is None:
raise NoConvertPath()
return _path
def steps(self, type_from, type_to):
"""
Return a list of conversion functions that should be applied to
translate from ``type_from`` to ``type_to``.
"""
for from_, to_ in bigrams(self.path(type_from, type_to)):
yield self._registry[from_][to_]
def convert(self, obj, type_from, type_to, word=None):
"""
Convert object from ``type_from`` to ``type_to``.
"""
for func in self.steps(type_from, type_to):
obj = func(obj, word)
return obj
def get_supported(self):
res = []
for type_from in self._registry:
for type_to in self._registry[type_from]:
res.append((type_from, type_to))
return res
_registry = Registry()
class ConversionError(Exception):
pass
def add(type_from, type_to, method):
"""
Register :param:``method`` as conversion method from
:param:``type_from`` to :param:``type_to``.
:param:``method`` signature should receive object of type ``type_from``
and return an object of type ``type_to``.
"""
_registry.add(type_from, type_to, method)
def steps(type_from, type_to):
"""
Return a list of conversion functions that should be applied to
convert from ``type_from`` to ``type_to``; raises ConversionError
if conversion is not possible.
"""
return _registry.steps(type_from, type_to)
def convert(obj, type_from, type_to, word=None):
""" Convert object from ``type_from`` to ``type_to`` optionally using ``word`` """
for func in steps(type_from, type_to):
obj = func(obj, word)
return obj
def get_supported():
""" Return a list of directly supported conversions """
return _registry.get_supported()
def converter(type_from, type_to):
""" Return conversion function. """
def conversion_func(tag, word=None):
return convert(tag, type_from, type_to, word)
conversion_func.__doc__ = """
Converts ``tag`` with optional ``word`` from '%s' to '%s'.
""" % (type_from, type_to)
return conversion_func | /russian-tagsets-0.6.tar.gz/russian-tagsets-0.6/russian_tagsets/converters.py | 0.877975 | 0.388792 | converters.py | pypi |
from __future__ import absolute_import, unicode_literals, print_function
import collections
import array
TagInfo = collections.namedtuple('TagInfo', 'position letter name full_name values')
TAGS_POSITIONS = [
TagInfo(1, 'p', 'POS', 'Part of Speech', {
'N': 'Noun',
'A': 'Adjective',
'P': 'Pronoun',
'C': 'Numeral',
'V': 'Verb',
'D': 'Adverb',
'R': 'Preposition',
'J': 'Conjunction',
'I': 'Interjection',
'T': 'Particle',
'Z': 'Punctuation',
'X': 'Unknown, special use',
}),
TagInfo(2, 's', 'subPOS', 'SubPOS (Detailed Part of Speech)', {
'#': 'Z: Sentence boundary',
',': 'J: Subordinate conjunction (esli, čto, kotoryj)',
'0': 'X: Part of a multiword foreign phrase',
'5': 'P: 3rd person pronoun in prepositional forms (nego, nej, ...)',
':': 'Z: Punctuation',
'=': 'C: Number written using digits',
'A': 'A: Adjective (long, non-participle) (xorosij, ploxoj)',
'B': 'V: Verb in present, past or rarely future form (čitaju, splju, pišum, spal, ždal)',
'C': 'A: Short adjective (non-participle) (surov, krasiv)',
'D': 'P: Pronoun demonstrative (ètot, tot, sej, takoj, èkij, ... )',
'F': 'R: Part of a preposition; never appears isolated (nesmotrja)',
'G': 'A: Participle, active or long passive (čitajuscij, čitavsij, pročitavšij, čitaemyj; but not pročitannyj (AA), pročitan (Ac)',
'I': 'I: Interjection (oj, aga, m-da)',
'N': 'N: Noun',
'P': 'P: Personal pronoun (ja, my, ty, vy, on, ona, ono, oni, sebja)',
'Q': 'P: Relative/interrogative pronoun with nominal declension (kto, čto)',
'R': 'R: Nonvocalized preposition (ob, pered, s, v, ...)',
'S': 'P: Possessive pronoun (moj, ego, svoj, ..)',
'T': 'T: Particle (li)',
'U': "A: Possessive adjective (mamin, oveč'ju)",
'V': 'R: Vocalized preposition (obo, peredo, so, vo, ...)',
'W': 'P: Negative pronoun with nominal declension (nicto, nikto)',
'X': 'X: Unknown, special use',
'Z': "P: Indefinite pronoun with nominal declension (kto-to, kto-nibud', cto-to, ...)",
'^': 'J: Non-subordinate conjunction (i, a, xotja, pricem)',
'a': "C: Indefinite numeral (mnogo, neskol'ko)",
'b': 'D: Adverb without a possibility to form negation and degrees of comparison (vverxu, vnizu, potom)',
'c': 'A: Short passive participle (procitan)',
'e': 'V: Gerund (delaja; pridja, otpisav)',
'f': "V: Infinitive (delat', spat')",
'g': 'D: Adverb forming negation and degrees of comparison (vysoko, daleko)',
'i': 'V: Imperative (spi, sdelaj, pročti)',
'j': 'C: Generic/collective numeral (dvoje, četvero)',
'n': 'C: Cardinal numeral (odin, tri, sorok)',
'q': 'P: Relative/interrogative pronoun with adjectival declension (kakoj, kotoryj, cej, ...)',
'r': 'C: Ordinal numeral (pervyj, tretij)',
'u': "C: Interrogative numeral (skol'ko)",
'v': 'C: Multiplicative numeral (dvaždy, triždy)',
'w': 'P: Negative pronoun with adjectival declension (nikakoj, nicej)',
'z': "P: Indefinite pronoun with adjectival declension (samyj, ves', ...)",
'}': 'C: Number written using Roman numerals (XIV)',
}),
TagInfo(3, 'g', 'gender', 'Gender', {
'F': 'Feminine',
'M': 'Masculine',
'N': 'Neuter',
'X': 'Any gender',
}),
TagInfo(4, 'y', 'animacy', 'Animacy', {
'A': 'Animate',
'I': 'Inanimate',
'X': 'Either',
}),
TagInfo(5, 'n', 'number', 'Number', {
'P': 'Plural',
'S': 'Singular',
'X': 'Any number'
}),
TagInfo(6, 'c', 'case', 'Case', {
'1': 'Nominative',
'2': 'Genitive',
'3': 'Dative',
'4': 'Accusative',
'6': 'Locative',
'7': 'Instrumental',
'X': 'Any case'
}),
TagInfo(7, 'f', 'possessors_gender', "Possessor's Gender", {
'F': 'Feminine possessor',
'M': 'Masculine possessor',
'N': 'Neuter possessor',
'X': 'Possessor of any gender',
}),
TagInfo(8, 'm', 'possessors_number', "Possessor's Number", {
'S': 'Singular possessor',
'P': 'Plural possessor',
}),
TagInfo(9, 'e', 'person', 'Person', {
'1': '1st person',
'2': '2nd person',
'3': '3rd person',
'X': 'Any person',
}),
TagInfo(10, 'r', 'reflexivity', 'Reflexivity', {
'I': 'Irreflexive',
'R': 'Reflexive',
}),
TagInfo(11, 't', 'tense', 'Tense', {
'F': 'Future',
'P': 'Present',
'R': 'Past',
'X': 'Any (Past, Present, or Future)',
}),
TagInfo(12, 'b', 'verbal_aspect', 'Verbal aspect', {
'P': 'perfective',
'I': 'imperfective',
'X': 'either aspect',
}),
TagInfo(13, 'd', 'degree_of_comparison', 'Degree of comparison', {
'1': 'Positive',
'2': 'Comparative',
'3': 'Superlative',
}),
TagInfo(14, 'a', 'negation', 'Negation', {
'A': 'Affirmative (not negated)',
'N': 'Negated',
}),
TagInfo(15, 'v', 'voice', 'Voice', {
'A': 'Active',
'P': 'Passive',
}),
TagInfo(16, 'i', 'variant', 'Variant, Abbreviation', {
'1': 'Variant (generally less frequent)',
'2': 'Variant (generally rarely used, bookish, or archaic)',
'3': 'Variant (very archaic)',
'5': 'Variant (colloquial)',
'6': 'Variant (colloquial, generally less frequent)',
'7': 'Variant (colloquial, generally less frequent)',
'8': 'Abbreviations'
}),
]
def _fget(ind):
def fget(self):
return self._data[ind]
return fget
def _fset(ind):
def fset(self, value):
if value != '-' and value not in TAGS_POSITIONS[ind].values:
raise ValueError('Invalid value %s' % value)
self._data[ind] = value
return fset
def _prop(ind):
return _fget(ind), _fset(ind)
class Tag(object):
def __init__(self, txt='-'*16):
if isinstance(txt, bytes):
txt = txt.decode('ascii')
self._data = array.array(str('u'), txt)
mainPOS = property(*_prop(0))
subPOS = property(*_prop(1))
gender = property(*_prop(2))
animacy = property(*_prop(3))
number = property(*_prop(4))
case = property(*_prop(5))
possessors_gender = property(*_prop(6))
possessors_number = property(*_prop(7))
person = property(*_prop(8))
reflexivity = property(*_prop(9))
tense = property(*_prop(10))
verbal_aspect = property(*_prop(11))
degree_of_comparison = property(*_prop(12))
negation = property(*_prop(13))
voice = property(*_prop(14))
variant = property(*_prop(15))
# 2-letter POS
def _get_pos(self):
return self._data[0:2].tounicode()
def _set_pos(self, txt):
self.mainPOS, self.subPOS = txt[0], txt[1]
POS = property(_get_pos, _set_pos)
def is_valid(self):
if len(self._data) != 16:
return False
for index, c in enumerate(self):
if c == '-':
continue
if c not in TAGS_POSITIONS[index].values:
return False
return True
def verbose_info(self):
return dict((
(TAGS_POSITIONS[index].name, TAGS_POSITIONS[index].values[tag])
for index, tag in enumerate(self) if tag != '-'
))
def __iter__(self):
return iter(self._data)
def __str__(self):
return self._data.tounicode() # this is not correct under python 2.x
def __repr__(self):
return 'Tag("%s")' % self
if __name__ == '__main__':
print(Tag("NNFIS7-------A--").verbose_info()) | /russian-tagsets-0.6.tar.gz/russian-tagsets-0.6/russian_tagsets/positional/__init__.py | 0.619471 | 0.352648 | __init__.py | pypi |
from pathlib import Path
import re, string, itertools, marisa_trie
from russian_uncensor.rd_wr_util import rd_wr_module
path_current_file = Path(__file__).parent
class Uncensor:
def __init__(self, dict_path=None, freq_letter_fn=None, bi_grams_fn=None, tri_grams_fn=None):
""" Init Uncensor class.
:param dict_path: path to dictionaries directory.
:param freq_letter_fn: frequent letters in obscene words filename.
:param bi_grams_fn: bi-grams in obscene words filename.
:param tri_grams_fn: tri-grams in obscene words filename.
:param win_len: sliding window length.
:return:
"""
# Paths:
self.dict_path = Path.joinpath(path_current_file, Path('data')) if dict_path is None else dict_path
self.freq_letters_fn = self.dict_path/'ngrams/freq_letters.marisa' \
if freq_letter_fn is None else self.dict_path/freq_letter_fn
self.bi_grams_fn = self.dict_path/'ngrams/bi_grams.marisa' \
if bi_grams_fn is None else self.dict_path/bi_grams_fn
self.tri_grams_fn = self.dict_path/'ngrams/tri_grams.marisa' \
if tri_grams_fn is None else self.dict_path/tri_grams_fn
# Dictionaries:
if str(self.freq_letters_fn)[-4:] == '.txt':
self.freq_letters = marisa_trie.Trie(rd_wr_module(path_dict=self.freq_letters_fn))
elif str(self.freq_letters_fn)[-7:] == '.marisa':
self.freq_letters = marisa_trie.Trie().load(self.freq_letters_fn)
if str(self.bi_grams_fn)[-4:] == '.txt':
self.bi_grams = marisa_trie.Trie(rd_wr_module(path_dict=self.bi_grams_fn))
elif str(self.bi_grams_fn)[-7:] == '.marisa':
self.bi_grams = marisa_trie.Trie().load(self.bi_grams_fn)
if str(self.tri_grams_fn)[-4:] == '.txt':
self.tri_grams = marisa_trie.Trie(rd_wr_module(path_dict=self.tri_grams_fn))
elif str(self.tri_grams_fn)[-7:] == '.marisa':
self.tri_grams = marisa_trie.Trie().load(self.tri_grams_fn)
# Parameters:
self.win_len = 3
self.delimiters = string.punctuation
def moving_window(self, seq):
""" Moving window with length equal 3 letters and step 1 letter to divide word on windows.
:param seq: input word.
:return: divided part of word - window.
"""
iterator = iter(seq)
result = tuple(itertools.islice(iterator, self.win_len))
if len(result) == self.win_len:
yield result
for elem in iterator:
result = result[1:] + (elem, )
yield result
def find_variants(self, word):
""" Find all possible bi- and tri-grams in masked places of the word.
:param word: input word.
:return: positions and variant possible bi- and tri-grams.
"""
if word.find('*') != -1:
tri_grams_slices = [''.join(gram) for gram in self.moving_window(seq=word)]
n_win = 0
possible_letters = dict()
for gram in tri_grams_slices:
if gram.find('*') != -1:
ind_symbol = [s.start() for s in re.finditer('\*', gram)]
ind_enable = [i for i in range(self.win_len) if i not in ind_symbol]
letters_cond = [gram[ind] if ind in ind_enable else '' for ind in range(self.win_len)]
variants = [tri_gram for tri_gram in self.tri_grams if letters_cond[0] in tri_gram[0] and
letters_cond[1] in tri_gram[1] and letters_cond[2] in tri_gram[2]]
for ind in ind_symbol:
letters = [*set([var[ind] for var in variants if var[ind] in self.freq_letters])]
if len(letters) > 0:
possible_letters.update({ind + n_win: letters})
n_win += 1
return possible_letters
return None
def uncensor_masked(self, word):
""" Find obscene words in hidden (masked) text.
:param word: input text.
:return: uncensored (unmasked) variants.
"""
word = word.lower()
word = word.translate({ord(ch): "*" for ch in self.delimiters})
variants = self.find_variants(word=word)
if variants is None:
return False, word
k = list(variants.keys())
k.append(0)
uncensored_variants = list()
particles = list()
ind_particles = list()
particles_tmp = None
ind_tmp = None
cnt_sequence = 0
for i in range(len(k) - 1):
if k[i + 1] - k[i] == 1:
cnt_sequence += 1
else:
if cnt_sequence > 0:
particles.append(particles_tmp)
ind_particles.append(ind_tmp)
else:
particles.append(variants[k[i]])
ind_particles.append(k[i])
cnt_sequence = 0
if cnt_sequence == 1:
particles_tmp = [bi_gram for bi_gram in self.bi_grams if bi_gram[0] in variants[k[i]] and
bi_gram[1] in variants[k[i + 1]]]
ind_tmp = [k[i], k[i + 1]]
elif cnt_sequence == 2:
particles_tmp = [tri_gram for tri_gram in self.tri_grams if tri_gram[0] in variants[k[i - 1]] and
tri_gram[1] in variants[k[i]] and tri_gram[2] in variants[k[i + 1]]]
ind_tmp = [k[i - 1], k[i], k[i + 1]]
if len(ind_particles) > 1:
combs = list(itertools.product(*particles))
for comb in combs:
word_listed = list(word)
order = 0
for ind in ind_particles:
try:
sub_order = 0
for sub_ind in ind:
word_listed[sub_ind] = comb[order][sub_order]
sub_order += 1
except TypeError:
word_listed[ind] = comb[order]
order += 1
uncensored_variants.append(''.join(word_listed))
else:
for comb in particles[0]:
word_listed = list(word)
try:
ind_tmp = 0
for ind in ind_particles[0]:
word_listed[ind] = comb[ind_tmp]
ind_tmp += 1
uncensored_variants.append(''.join(word_listed))
except TypeError:
word_listed[ind_particles[0]] = comb
uncensored_variants.append(''.join(word_listed))
return True, uncensored_variants
def uncensor_splitted(self, sequence):
""" Find obscene words in splitted text.
:param sentence: input text.
:return: uncensored (united) variants.
"""
words = re.split(f'[{self.delimiters} ]', sequence.lower())
variants = list()
for i in range(len(words) - 1):
prev_word = words[0]
sentence_str = prev_word
ind = i
ind_words = [ind]
words.pop(0)
for word in words:
if prev_word[-1] in self.freq_letters and word[0] in self.freq_letters and prev_word[-1] + word[0] in self.bi_grams:
sentence_str += word
ind_words.append(ind + 1)
variants.append((sentence_str, ind_words.copy()))
ind += 1
prev_word = word
else:
break
return variants | /russian_uncensor-0.1.7.tar.gz/russian_uncensor-0.1.7/russian_uncensor/uncensored.py | 0.535827 | 0.299931 | uncensored.py | pypi |
import os, marisa_trie
from pathlib import Path
from collections import Counter
from russian_uncensor.rd_wr_util import rd_wr_module
path_current_file = Path(__file__).parent
class WordStats:
def __init__(self, dict_path=None, neg_words_fn=None, freq_letters_fn=None, bigrams_fn=None, trigrams_fn=None,
ext='.marisa', debug=False):
""" Init WordStats class.
:param dict_path: common path to data files dir (str).
:param neg_words_fn: obscene components file name (str).
:param freq_letters_fn: frequent letters used in obscene words file name (str).
:param bigrams_fn: frequent bi-grams used in obscene words file name (str).
:param trigrams_fn: frequent tri-grams used in obscene words file name (str).
:param ext: extension of output files. .txt or .marisa (str).
:param debug: turn on instruments for debug (bool).
:return:
"""
# File paths:
self.dict_path = Path.joinpath(path_current_file, Path('data')) if dict_path is None else dict_path
self.ext = ext
self.neg_words_filename = self.dict_path/f'obscene_words.marisa' \
if neg_words_fn is None else self.dict_path/neg_words_fn
self.frequent_letters_filename = self.dict_path/f'ngrams/freq_letters{self.ext}' \
if freq_letters_fn is None else self.dict_path/freq_letters_fn
self.bi_grams_filename = self.dict_path/f'ngrams/bi_grams{self.ext}' \
if bigrams_fn is None else self.dict_path/bigrams_fn
self.tri_grams_filename = self.dict_path/f'ngrams/tri_grams{self.ext}' \
if trigrams_fn is None else self.dict_path/trigrams_fn
# Crete dir (if it doesnt exist)
if not os.path.exists(path=self.dict_path/'ngrams'):
os.mkdir(path=self.dict_path/'ngrams')
# Others:
self.ru_alphabet = set('абвгдеёжзийклмнопрстуфхцчшщыэюя')
self.debug = debug
def frequent_letters_stat(self):
""" Get counter of the frequent letters in obscene words.
:return: Counter of the frequent letters in obscene words (dict).
"""
frequent_letters_cnt = Counter()
if str(self.neg_words_filename)[-4:] == '.txt':
neg_words = marisa_trie.Trie(rd_wr_module(self.neg_words_filename))
elif str(self.neg_words_filename)[-7:] == '.marisa':
neg_words = marisa_trie.Trie().load(self.neg_words_filename)
for word in neg_words:
for letter in word:
if letter in self.ru_alphabet:
frequent_letters_cnt.update([letter])
frequent_letters_cnt = list(dict(frequent_letters_cnt.most_common()))
return frequent_letters_cnt
def bi_grams_stat(self):
""" Get counter of the bi-grams in obscene words.
:return: Counter of the bi-grams in obscene words (dict).
"""
bigrams_cnt = Counter()
if str(self.neg_words_filename)[-4:] == '.txt':
neg_words = marisa_trie.Trie(rd_wr_module(self.neg_words_filename))
elif str(self.neg_words_filename)[-7:] == '.marisa':
neg_words = marisa_trie.Trie().load(self.neg_words_filename)
for word in neg_words:
for i in range(len(word) - 1):
if word[i] in self.ru_alphabet and word[i + 1] in self.ru_alphabet:
bigrams_cnt.update([word[i] + word[i + 1]])
bigrams_cnt = list(dict(bigrams_cnt.most_common()))
return bigrams_cnt
def tri_grams_stat(self):
""" Get counter of the tri-grams in obscene words.
:return: Counter of the tri-grams in obscene words (dict).
"""
trigrams_cnt = Counter()
if str(self.neg_words_filename)[-4:] == '.txt':
neg_words = marisa_trie.Trie(rd_wr_module(self.neg_words_filename))
elif str(self.neg_words_filename)[-7:] == '.marisa':
neg_words = marisa_trie.Trie().load(self.neg_words_filename)
for word in neg_words:
for i in range(len(word) - 2):
if word[i] in self.ru_alphabet and word[i + 1] in self.ru_alphabet and word[i + 2] in self.ru_alphabet:
trigrams_cnt.update([word[i] + word[i + 1] + word[i + 2]])
trigrams_cnt = list(dict(trigrams_cnt.most_common()))
return trigrams_cnt
def get_n_grams(self):
""" Get tuple of frequent letters, bi-grams and tri-grams based on obscene words.
:return: frequent letters, bi-grams, tri-grams.
"""
return self.frequent_letters_stat(), self.bi_grams_stat(), self.tri_grams_stat()
def save_n_grams(self):
""" Save stats components in txt. file.
:return: None.
"""
filenames = [self.frequent_letters_filename, self.bi_grams_filename, self.tri_grams_filename]
n_grams = self.get_n_grams()
for group in zip(n_grams, filenames):
if self.debug:
print(f'Filename: {group[1]} Content: {group[0]}')
if self.ext == '.txt':
rd_wr_module(path_dict=group[1], input_dict=group[0], mode='w')
elif self.ext == '.marisa':
marisa_trie.Trie(group[0]).save(group[1]) | /russian_uncensor-0.1.7.tar.gz/russian_uncensor-0.1.7/russian_uncensor/n_grams.py | 0.410756 | 0.347842 | n_grams.py | pypi |
import asyncio
import re
import logging
# Maintain compat with various 3.x async changes
if hasattr(asyncio, 'ensure_future'):
ensure_future = asyncio.ensure_future
else:
ensure_future = getattr(asyncio, 'async')
logger = logging.getLogger('russound')
_re_response = re.compile(
r"(?:(?:S\[(?P<source>\d+)\])|(?:C\[(?P<controller>\d+)\]"
r".Z\[(?P<zone>\d+)\]))\.(?P<variable>\S+)=\"(?P<value>.*)\"")
class CommandException(Exception):
""" A command sent to the controller caused an error. """
pass
class UncachedVariable(Exception):
""" A variable was not found in the cache. """
pass
class ZoneID:
"""Uniquely identifies a zone
Russound controllers can be linked together to expand the total zone count.
Zones are identified by their zone index (1-N) within the controller they
belong to and the controller index (1-N) within the entire system.
"""
def __init__(self, zone, controller=1):
self.zone = int(zone)
self.controller = int(controller)
def __str__(self):
return "%d:%d" % (self.controller, self.zone)
def __eq__(self, other):
return hasattr(other, 'zone') and \
hasattr(other, 'controller') and \
other.zone == self.zone and \
other.controller == self.controller
def __hash__(self):
return hash(str(self))
def device_str(self):
"""
Generate a string that can be used to reference this zone in a RIO
command
"""
return "C[%d].Z[%d]" % (self.controller, self.zone)
class Russound:
"""Manages the RIO connection to a Russound device."""
def __init__(self, loop, host, port=9621):
"""
Initialize the Russound object using the event loop, host and port
provided.
"""
self._loop = loop
self._host = host
self._port = port
self._ioloop_future = None
self._cmd_queue = asyncio.Queue()
self._source_state = {}
self._zone_state = {}
self._watched_zones = set()
self._watched_sources = set()
self._zone_callbacks = []
self._source_callbacks = []
def _retrieve_cached_zone_variable(self, zone_id, name):
"""
Retrieves the cache state of the named variable for a particular
zone. If the variable has not been cached then the UncachedVariable
exception is raised.
"""
try:
s = self._zone_state[zone_id][name.lower()]
logger.debug("Zone Cache retrieve %s.%s = %s",
zone_id.device_str(), name, s)
return s
except KeyError:
raise UncachedVariable
def _store_cached_zone_variable(self, zone_id, name, value):
"""
Stores the current known value of a zone variable into the cache.
Calls any zone callbacks.
"""
zone_state = self._zone_state.setdefault(zone_id, {})
name = name.lower()
zone_state[name] = value
logger.debug("Zone Cache store %s.%s = %s",
zone_id.device_str(), name, value)
for callback in self._zone_callbacks:
callback(zone_id, name, value)
def _retrieve_cached_source_variable(self, source_id, name):
"""
Retrieves the cache state of the named variable for a particular
source. If the variable has not been cached then the UncachedVariable
exception is raised.
"""
try:
s = self._source_state[source_id][name.lower()]
logger.debug("Source Cache retrieve S[%d].%s = %s",
source_id, name, s)
return s
except KeyError:
raise UncachedVariable
def _store_cached_source_variable(self, source_id, name, value):
"""
Stores the current known value of a source variable into the cache.
Calls any source callbacks.
"""
source_state = self._source_state.setdefault(source_id, {})
name = name.lower()
source_state[name] = value
logger.debug("Source Cache store S[%d].%s = %s",
source_id, name, value)
for callback in self._source_callbacks:
callback(source_id, name, value)
def _process_response(self, res):
s = str(res, 'utf-8').strip()
ty, payload = s[0], s[2:]
if ty == 'E':
logger.debug("Device responded with error: %s", payload)
raise CommandException(payload)
m = _re_response.match(payload)
if not m:
return ty, None
p = m.groupdict()
if p['source']:
source_id = int(p['source'])
self._store_cached_source_variable(
source_id, p['variable'], p['value'])
elif p['zone']:
zone_id = ZoneID(controller=p['controller'], zone=p['zone'])
self._store_cached_zone_variable(zone_id,
p['variable'],
p['value'])
return ty, p['value']
async def _ioloop(self, reader, writer):
queue_future = ensure_future(
self._cmd_queue.get())
net_future = ensure_future(
reader.readline())
try:
logger.debug("Starting IO loop")
while True:
done, pending = await asyncio.wait(
[queue_future, net_future],
return_when=asyncio.FIRST_COMPLETED)
if net_future in done:
response = net_future.result()
try:
self._process_response(response)
except CommandException:
pass
net_future = ensure_future(
reader.readline())
if queue_future in done:
cmd, future = queue_future.result()
cmd += '\r'
writer.write(bytearray(cmd, 'utf-8'))
await writer.drain()
queue_future = ensure_future(
self._cmd_queue.get())
while True:
response = await net_future
net_future = ensure_future(
reader.readline())
try:
ty, value = self._process_response(response)
if ty == 'S':
future.set_result(value)
break
except CommandException as e:
future.set_exception(e)
break
logger.debug("IO loop exited")
except asyncio.CancelledError:
logger.debug("IO loop cancelled")
writer.close()
queue_future.cancel()
net_future.cancel()
raise
except Exception:
logger.exception("Unhandled exception in IO loop")
raise
async def _send_cmd(self, cmd):
future = asyncio.Future()
await self._cmd_queue.put((cmd, future))
r = await future
return r
def add_zone_callback(self, callback):
"""
Registers a callback to be called whenever a zone variable changes.
The callback will be passed three arguments: the zone_id, the variable
name and the variable value.
"""
self._zone_callbacks.append(callback)
def remove_zone_callback(self, callback):
"""
Removes a previously registered zone callback.
"""
self._zone_callbacks.remove(callback)
def add_source_callback(self, callback):
"""
Registers a callback to be called whenever a source variable changes.
The callback will be passed three arguments: the source_id, the
variable name and the variable value.
"""
self._source_callbacks.append(callback)
def remove_source_callback(self, source_id, callback):
"""
Removes a previously registered zone callback.
"""
self._source_callbacks.remove(callback)
async def connect(self):
"""
Connect to the controller and start processing responses.
"""
logger.info("Connecting to %s:%s", self._host, self._port)
reader, writer = await asyncio.open_connection(
self._host, self._port)
self._ioloop_future = ensure_future(
self._ioloop(reader, writer))
logger.info("Connected")
async def close(self):
"""
Disconnect from the controller.
"""
logger.info("Closing connection to %s:%s", self._host, self._port)
self._ioloop_future.cancel()
try:
await self._ioloop_future
except asyncio.CancelledError:
pass
async def set_zone_variable(self, zone_id, variable, value):
"""
Set a zone variable to a new value.
"""
return self._send_cmd("SET %s.%s=\"%s\"" % (
zone_id.device_str(), variable, value))
async def get_zone_variable(self, zone_id, variable):
""" Retrieve the current value of a zone variable. If the variable is
not found in the local cache then the value is requested from the
controller. """
try:
return self._retrieve_cached_zone_variable(zone_id, variable)
except UncachedVariable:
return (await self._send_cmd("GET %s.%s" % (
zone_id.device_str(), variable)))
def get_cached_zone_variable(self, zone_id, variable, default=None):
""" Retrieve the current value of a zone variable from the cache or
return the default value if the variable is not present. """
try:
return self._retrieve_cached_zone_variable(zone_id, variable)
except UncachedVariable:
return default
async def watch_zone(self, zone_id):
""" Add a zone to the watchlist.
Zones on the watchlist will push all
state changes (and those of the source they are currently connected to)
back to the client """
r = await self._send_cmd(
"WATCH %s ON" % (zone_id.device_str(), ))
self._watched_zones.add(zone_id)
return r
async def unwatch_zone(self, zone_id):
""" Remove a zone from the watchlist. """
self._watched_zones.remove(zone_id)
return (await
self._send_cmd("WATCH %s OFF" % (zone_id.device_str(), )))
async def send_zone_event(self, zone_id, event_name, *args):
""" Send an event to a zone. """
cmd = "EVENT %s!%s %s" % (
zone_id.device_str(), event_name,
" ".join(str(x) for x in args))
return (await self._send_cmd(cmd))
async def enumerate_zones(self):
""" Return a list of (zone_id, zone_name) tuples """
zones = []
for controller in range(1, 8):
for zone in range(1, 17):
zone_id = ZoneID(zone, controller)
try:
name = await self.get_zone_variable(zone_id, 'name')
if name:
zones.append((zone_id, name))
except CommandException:
break
return zones
async def set_source_variable(self, source_id, variable, value):
""" Change the value of a source variable. """
source_id = int(source_id)
return self._send_cmd("SET S[%d].%s=\"%s\"" % (
source_id, variable, value))
async def get_source_variable(self, source_id, variable):
""" Get the current value of a source variable. If the variable is not
in the cache it will be retrieved from the controller. """
source_id = int(source_id)
try:
return self._retrieve_cached_source_variable(
source_id, variable)
except UncachedVariable:
return (await self._send_cmd("GET S[%d].%s" % (
source_id, variable)))
def get_cached_source_variable(self, source_id, variable, default=None):
""" Get the cached value of a source variable. If the variable is not
cached return the default value. """
source_id = int(source_id)
try:
return self._retrieve_cached_source_variable(
source_id, variable)
except UncachedVariable:
return default
async def watch_source(self, source_id):
""" Add a souce to the watchlist. """
source_id = int(source_id)
r = await self._send_cmd(
"WATCH S[%d] ON" % (source_id, ))
self._watched_source.add(source_id)
return r
async def unwatch_source(self, source_id):
""" Remove a souce from the watchlist. """
source_id = int(source_id)
self._watched_sources.remove(source_id)
return (await
self._send_cmd("WATCH S[%d] OFF" % (
source_id, )))
async def enumerate_sources(self):
""" Return a list of (source_id, source_name) tuples """
sources = []
for source_id in range(1, 17):
try:
name = await self.get_source_variable(source_id, 'name')
if name:
sources.append((source_id, name))
except CommandException:
break
return sources | /russound_rio-1.0.0.tar.gz/russound_rio-1.0.0/russound_rio/rio.py | 0.574514 | 0.159348 | rio.py | pypi |
import os
from .game_config import GameConfig
class Result:
def __init__(self, match_config, cfg=None):
if isinstance(match_config, GameConfig):
self.match_id = None
self.bot1 = match_config.player1
self.bot2 = match_config.player2
self.map = match_config.map_name
self.replay_path = match_config.replay_name
else:
self.match_id = match_config.id
self.bot1 = match_config.bot1.name
self.bot2 = match_config.bot2.name
self.map = match_config.map_name
self._config = cfg
self.replay_path = None
self.result = None
self.game_time = 0
self.game_time_formatted = None
self.time_stamp = None
self.bot1_avg_frame = 0
self.bot2_avg_frame = 0
self.winner = None
self.bot1_tags = None
self.bot2_tags = None
def __repr__(self):
return f"""
Result={self.result}
Winner={self.winner}
GameTime={self.game_time}
Bot1AvgStepTime={self.bot1_avg_frame}
Bot2AvgStepTime={self.bot2_avg_frame}
"""
def to_json(self):
"""
Convert Result object to JSON
"""
return {
"MatchID": self.match_id,
"Bot1": self.bot1,
"Bot2": self.bot2,
"Winner": self.winner,
"Map": self.map,
"Result": self.result if self.result else "Error",
"GameTime": self.game_time,
"GameTimeFormatted": self.game_time_formatted,
"TimeStamp": self.time_stamp,
"Bot1AvgFrame": self.bot1_avg_frame,
"Bot2AvgFrame": self.bot2_avg_frame,
'ReplayPath': self.replay_path,
'Bot1Tags': self.bot1_tags,
'Bot2Tags': self.bot2_tags
}
def has_result(self):
"""
Checks if there is a result already
"""
return self.result is not None
def parse_result(self, result=None, error=False):
"""
Parse result messages into object
"""
if not result and not error:
raise NameError("Result.parse_result requires either a result or an error boolean")
if error:
result = {"Result": "Error"}
if result.get("Result", None):
temp_results = result['Result']
if temp_results == "Error":
self.result = "Error"
return
if temp_results[self.bot1] == "SC2Crash" or temp_results[self.bot2] == "SC2Crash":
self.result = "Error"
return
elif temp_results[self.bot1] == "Crash":
self.result = "Player1Crash"
self.winner = self.bot2
elif temp_results[self.bot2] == "Crash":
self.result = "Player2Crash"
self.winner = self.bot1
elif temp_results[self.bot1] == "Timeout":
self.result = "Player1TimeOut"
self.winner = self.bot2
elif temp_results[self.bot2] == "Timeout":
self.result = "Player2TimeOut"
self.winner = self.bot1
elif temp_results[self.bot1] == "Victory":
self.result = "Player1Win"
self.winner = self.bot1
elif temp_results[self.bot1] == "Defeat":
self.result = "Player2Win"
self.winner = self.bot2
elif temp_results[self.bot1] == "Tie":
self.result = "Tie"
self.winner = "Tie"
elif temp_results[self.bot2] == "Tie":
self.result = "Tie"
self.winner = "Tie"
elif temp_results[self.bot1] == 'InitializationError':
self.result = "InitializationError"
elif temp_results[self.bot2] == 'InitializationError':
self.result = "InitializationError"
if result.get("GameTime", None):
self.game_time = result["GameTime"]
self.game_time_formatted = result["GameTimeFormatted"]
if result.get("AverageFrameTime", None):
self.bot1_avg_frame = result['AverageFrameTime'].get(self.bot1, 0)
self.bot2_avg_frame = result['AverageFrameTime'].get(self.bot2, 0)
if result.get("TimeStamp", None):
self.time_stamp = result["TimeStamp"]
if result.get("Tags", None):
self.bot1_tags = result['Tags'].get(self.bot1, [])
self.bot2_tags = result['Tags'].get(self.bot2, [])
if self.replay_path is None:
self.replay_path = os.path.join(
self._config.REPLAYS_DIRECTORY, f'{self.match_id}_{self.bot1}_vs_{self.bot2}.SC2Replay') | /rust_arenaclient-0.2.4-cp38-cp38-manylinux_2_31_x86_64.whl/rust_ac/result.py | 0.464659 | 0.195191 | result.py | pypi |
from __future__ import annotations
from typing import Callable, List, Dict, Optional, Any, Tuple
from dataclasses import dataclass, field, asdict
from wit_gen import FixedGenContext, StepInstance
from expr import Expr
from util import uuid
from query import Queriable
# pub struct Circuit<F, TraceArgs> {
# pub step_types: HashMap<u32, Rc<ASTStepType<F>>>,
# pub forward_signals: Vec<ForwardSignal>,
# pub shared_signals: Vec<SharedSignal>,
# pub fixed_signals: Vec<FixedSignal>,
# pub halo2_advice: Vec<ImportedHalo2Advice>,
# pub halo2_fixed: Vec<ImportedHalo2Fixed>,
# pub exposed: Vec<ForwardSignal>,
# pub annotations: HashMap<u32, String>,
# pub trace: Option<Rc<Trace<F, TraceArgs>>>,
# pub fixed_gen: Option<Rc<FixedGen<F>>>,
# pub first_step: Option<ASTStepTypeUUID>,
# pub last_step: Option<ASTStepTypeUUID>,
# pub num_steps: usize,
# }
@dataclass
class ASTCircuit:
step_types: Dict[int, ASTStepType] = field(default_factory=dict)
forward_signals: List[ForwardSignal] = field(default_factory=list)
shared_signals: List[SharedSignal] = field(default_factory=list)
fixed_signals: List[FixedSignal] = field(default_factory=list)
exposed: List[Tuple[Queriable, ExposeOffset]] = field(default_factory=list)
annotations: Dict[int, str] = field(default_factory=dict)
fixed_gen: Optional[Callable] = None
first_step: Optional[int] = None
last_step: Optional[int] = None
num_steps: int = 0
q_enable: bool = True
id: int = uuid()
def __str__(self: ASTCircuit):
step_types_str = (
"\n\t\t"
+ ",\n\t\t".join(f"{k}: {v}" for k, v in self.step_types.items())
+ "\n\t"
if self.step_types
else ""
)
forward_signals_str = (
"\n\t\t" + ",\n\t\t".join(str(fs) for fs in self.forward_signals) + "\n\t"
if self.forward_signals
else ""
)
shared_signals_str = (
"\n\t\t" + ",\n\t\t".join(str(ss) for ss in self.shared_signals) + "\n\t"
if self.shared_signals
else ""
)
fixed_signals_str = (
"\n\t\t" + ",\n\t\t".join(str(fs) for fs in self.fixed_signals) + "\n\t"
if self.fixed_signals
else ""
)
exposed_str = (
"\n\t\t"
+ ",\n\t\t".join(f"({str(lhs)}, {str(rhs)})" for (lhs, rhs) in self.exposed)
+ "\n\t"
if self.exposed
else ""
)
annotations_str = (
"\n\t\t"
+ ",\n\t\t".join(f"{k}: {v}" for k, v in self.annotations.items())
+ "\n\t"
if self.annotations
else ""
)
return (
f"ASTCircuit(\n"
f"\tstep_types={{{step_types_str}}},\n"
f"\tforward_signals=[{forward_signals_str}],\n"
f"\tshared_signals=[{shared_signals_str}],\n"
f"\tfixed_signals=[{fixed_signals_str}],\n"
f"\texposed=[{exposed_str}],\n"
f"\tannotations={{{annotations_str}}},\n"
f"\tfixed_gen={self.fixed_gen},\n"
f"\tfirst_step={self.first_step},\n"
f"\tlast_step={self.last_step},\n"
f"\tnum_steps={self.num_steps}\n"
f"\tq_enable={self.q_enable}\n"
f")"
)
def __json__(self: ASTCircuit):
return {
"step_types": {k: v.__json__() for k, v in self.step_types.items()},
"forward_signals": [x.__json__() for x in self.forward_signals],
"shared_signals": [x.__json__() for x in self.shared_signals],
"fixed_signals": [x.__json__() for x in self.fixed_signals],
"exposed": [
[queriable.__json__(), offset.__json__()]
for (queriable, offset) in self.exposed
],
"annotations": self.annotations,
"first_step": self.first_step,
"last_step": self.last_step,
"num_steps": self.num_steps,
"q_enable": self.q_enable,
"id": self.id,
}
def add_forward(self: ASTCircuit, name: str, phase: int) -> ForwardSignal:
signal = ForwardSignal(phase, name)
self.forward_signals.append(signal)
self.annotations[signal.id] = name
return signal
def add_shared(self: ASTCircuit, name: str, phase: int) -> SharedSignal:
signal = SharedSignal(phase, name)
self.shared_signals.append(signal)
self.annotations[signal.id] = name
return signal
def add_fixed(self: ASTCircuit, name: str) -> FixedSignal:
signal = FixedSignal(name)
self.fixed_signals.append(signal)
self.annotations[signal.id] = name
return signal
def expose(self: ASTCircuit, signal: Queriable, offset: ExposeOffset):
self.exposed.append((signal, offset))
def add_step_type(self: ASTCircuit, step_type: ASTStepType, name: str):
self.annotations[step_type.id] = name
self.step_types[step_type.id] = step_type
def set_fixed_gen(self, fixed_gen_def: Callable[[FixedGenContext], None]):
if self.fixed_gen is not None:
raise Exception("ASTCircuit cannot have more than one fixed generator.")
else:
self.fixed_gen = fixed_gen_def
def get_step_type(self, uuid: int) -> ASTStepType:
if uuid in self.step_types.keys():
return self.step_types[uuid]
else:
raise ValueError("ASTStepType not found.")
# pub struct StepType<F> {
# id: StepTypeUUID,
# pub name: String,
# pub signals: Vec<InternalSignal>,
# pub constraints: Vec<Constraint<F>>,
# pub transition_constraints: Vec<TransitionConstraint<F>>,
# pub lookups: Vec<Lookup<F>>,
# pub annotations: HashMap<u32, String>,
# }
@dataclass
class ASTStepType:
id: int
name: str
signals: List[InternalSignal]
constraints: List[ASTConstraint]
transition_constraints: List[TransitionConstraint]
annotations: Dict[int, str]
def new(name: str) -> ASTStepType:
return ASTStepType(uuid(), name, [], [], [], {})
def __str__(self):
signals_str = (
"\n\t\t\t\t"
+ ",\n\t\t\t\t".join(str(signal) for signal in self.signals)
+ "\n\t\t\t"
if self.signals
else ""
)
constraints_str = (
"\n\t\t\t\t"
+ ",\n\t\t\t\t".join(str(constraint) for constraint in self.constraints)
+ "\n\t\t\t"
if self.constraints
else ""
)
transition_constraints_str = (
"\n\t\t\t\t"
+ ",\n\t\t\t\t".join(str(tc) for tc in self.transition_constraints)
+ "\n\t\t\t"
if self.transition_constraints
else ""
)
annotations_str = (
"\n\t\t\t\t"
+ ",\n\t\t\t\t".join(f"{k}: {v}" for k, v in self.annotations.items())
+ "\n\t\t\t"
if self.annotations
else ""
)
return (
f"ASTStepType(\n"
f"\t\t\tid={self.id},\n"
f"\t\t\tname='{self.name}',\n"
f"\t\t\tsignals=[{signals_str}],\n"
f"\t\t\tconstraints=[{constraints_str}],\n"
f"\t\t\ttransition_constraints=[{transition_constraints_str}],\n"
f"\t\t\tannotations={{{annotations_str}}}\n"
f"\t\t)"
)
def __json__(self):
return {
"id": self.id,
"name": self.name,
"signals": [x.__json__() for x in self.signals],
"constraints": [x.__json__() for x in self.constraints],
"transition_constraints": [
x.__json__() for x in self.transition_constraints
],
"annotations": self.annotations,
}
def add_signal(self: ASTStepType, name: str) -> InternalSignal:
signal = InternalSignal(name)
self.signals.append(signal)
self.annotations[signal.id] = name
return signal
def add_constr(self: ASTStepType, annotation: str, expr: Expr):
condition = ASTConstraint(annotation, expr)
self.constraints.append(condition)
def add_transition(self: ASTStepType, annotation: str, expr: Expr):
condition = TransitionConstraint(annotation, expr)
self.transition_constraints.append(condition)
def __eq__(self: ASTStepType, other: ASTStepType) -> bool:
if isinstance(self, ASTStepType) and isinstance(other, ASTStepType):
return self.id == other.id
return False
def __req__(other: ASTStepType, self: ASTStepType) -> bool:
return ASTStepType.__eq__(self, other)
def __hash__(self: ASTStepType):
return hash(self.id)
@dataclass
class ASTConstraint:
annotation: str
expr: Expr
def __str__(self: ASTConstraint):
return (
f"Constraint(\n"
f"\t\t\t\t\tannotation='{self.annotation}',\n"
f"\t\t\t\t\texpr={self.expr}\n"
f"\t\t\t\t)"
)
def __json__(self: ASTConstraint):
return {"annotation": self.annotation, "expr": self.expr.__json__()}
@dataclass
class TransitionConstraint:
annotation: str
expr: Expr
def __str__(self: TransitionConstraint):
return f"TransitionConstraint({self.annotation})"
def __json__(self: TransitionConstraint):
return {"annotation": self.annotation, "expr": self.expr.__json__()}
@dataclass
class ForwardSignal:
id: int
phase: int
annotation: str
def __init__(self: ForwardSignal, phase: int, annotation: str):
self.id: int = uuid()
self.phase = phase
self.annotation = annotation
def __str__(self: ForwardSignal):
return f"ForwardSignal(id={self.id}, phase={self.phase}, annotation='{self.annotation}')"
def __json__(self: ForwardSignal):
return asdict(self)
@dataclass
class SharedSignal:
id: int
phase: int
annotation: str
def __init__(self: SharedSignal, phase: int, annotation: str):
self.id: int = uuid()
self.phase = phase
self.annotation = annotation
def __str__(self: SharedSignal):
return f"SharedSignal(id={self.id}, phase={self.phase}, annotation='{self.annotation}')"
def __json__(self: SharedSignal):
return asdict(self)
class ExposeOffset:
pass
class First(ExposeOffset):
def __str__(self: First):
return "First"
def __json__(self: First):
return {"First": 0}
class Last(ExposeOffset):
def __str__(self: Last):
return "Last"
def __json__(self: Last):
return {"Last": -1}
@dataclass
class Step(ExposeOffset):
offset: int
def __str__(self: Step):
return f"Step({self.offset})"
def __json__(self: Step):
return {"Step": self.offset}
@dataclass
class FixedSignal:
id: int
annotation: str
def __init__(self: FixedSignal, annotation: str):
self.id: int = uuid()
self.annotation = annotation
def __str__(self: FixedSignal):
return f"FixedSignal(id={self.id}, annotation='{self.annotation}')"
def __json__(self: FixedSignal):
return asdict(self)
@dataclass
class InternalSignal:
id: int
annotation: str
def __init__(self: InternalSignal, annotation: str):
self.id = uuid()
self.annotation = annotation
def __str__(self: InternalSignal):
return f"InternalSignal(id={self.id}, annotation='{self.annotation}')"
def __json__(self: InternalSignal):
return asdict(self) | /rust_chiquito-0.1.0.tar.gz/rust_chiquito-0.1.0/pychiquito/chiquito_ast.py | 0.841517 | 0.328341 | chiquito_ast.py | pypi |
# Part 3: Fibonacci Example
The best learning is by doing, In this Chapter, we will walk through the [fibonacci.py](https://github.com/qwang98/PyChiquito/blob/main/pychiquito/fibonacci.py) example.
# Chapter 1: Fibonacci and Chiquito Concepts
The Fibonacci series is an infinite series, starting from "1" and "1", in which every number in the series is the sum of two numbers preceding it in the series. The first few rounds for the Fibonacci series are:
- 1 + 1 = 2
- 1 + 2 = 3
- 2 + 3 = 5
- 3 + 5 = 8
Therefore, to express these mathematical equations, we need three variables "a", "b", and "c", which we call "signals" in Chiquito. Because zero knowledge proofs typically express mathematical equations in the matrix form, we construct the following table:
||Signals||
| :-: | :-: | :-: |
| a | b | c |
| 1 | 1 | 2 |
| 1 | 2 | 3 |
| 2 | 3 | 5 |
| 3 | 5 | 8 |
|| ... ||
Besides assigning values to these signals, we also need to define the mathematical relationships among them, which we call "constraints: in Chiquito. For each row:
- a + b == c
- b = a in the next row
- c = b in the next row
||Signals|||Setups||
| :-: | :-: | :-: | :-: | :-: | :-: |
| a | b | c | constraint 1 | constraint 2 | constraint 3 |
| 1 | 1 | 2 | a + b == c | b == a.next | c == b.next |
| 1 | 2 | 3 | a + b == c | b == a.next | c == b.next |
| 2 | 3 | 5 | a + b == c | b == a.next | c == b.next |
| 3 | 5 | 8 | a + b == c | b == a.next | c == b.next |
|| ... ||| ... ||
Chiquito is a step-based language for constructing zero knowledge circuits, which means that instead of expressing all signals and constraints above as one entirety, we separate them into smaller chunks, called "step instances". In this example, each row, together with its signals and constraints, are collectively called a "step instance". We add indices for these step instances to the table, starting from 0:
| Step Instance Index || Signals ||| Setups ||
| :-: | :-: | :-: | :-: | :-: | :-: | :-: |
|| a | b | c | constraint 1 | constraint 2 | constraint 3 |
| 0 | 1 | 1 | 2 | a + b == c | b == a.next | c == b.next |
| 1 | 1 | 2 | 3 | a + b == c | b == a.next | c == b.next |
| 2 | 2 | 3 | 5 | a + b == c | b == a.next | c == b.next |
| 3 | 3 | 5 | 8 | a + b == c | b == a.next | c == b.next |
| ... || ... ||| ... ||
Note that these 4 step instances share the same signals and constraints, although not the same value assignments for signals. They are essentially different instantiations of the same signals and constraints, or different "step instances" of the same "step type". In Chiquito, we define signals and constraints in "step types" and we generate "witness assignments" for signal values. "Step types" with "witness assignments" become "step instances". In the example above, we have 4 step instances but only 1 step type, defined as the same 3 signals and 3 constraints. For simplicity, we call this single step type "fibo step", also added to the table below:
| Step Type | Step Instance Index || Signals ||| Setups ||
| :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
||| a | b | c | constraint 1 | constraint 2 | constraint 3 |
| fibo step | 0 | 1 | 1 | 2 | a + b == c | b == a.next | c == b.next |
| fibo step | 1 | 1 | 2 | 3 | a + b == c | b == a.next | c == b.next |
| fibo step | 2 | 2 | 3 | 5 | a + b == c | b == a.next | c == b.next |
| fibo step | 3 | 3 | 5 | 8 | a + b == c | b == a.next | c == b.next |
| ... | ... || ... ||| ... ||
As a recap, to construct a Fibonacci circuit, we need three signals-"a", "b", and "c". In Chiquito, "circuit" is a set of constraints that signals of the circuit need to satisfy. Each Chiquito circuit is composed of multiple "step instances", which are instantiations of "step types". Each "step type" contains a subset of signals and constraints that these signals need to satisfy.
| /rust_chiquito-0.1.0.tar.gz/rust_chiquito-0.1.0/pychiquito/tutorial_pt3_ch1.ipynb | 0.809163 | 0.97711 | tutorial_pt3_ch1.ipynb | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.