code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from typing import Callable
import requests
from .common import common_client, GET, logger
def get_pipedrive_client(api_token:str, domain:str) -> Callable:
"""Returns a callable you can use to interact with your instance of Pipedrive.
You'll need your [personal API token](https://pipedrive.readme.io/docs/how-to-find-the-api-token)
and the [company domain](https://pipedrive.readme.io/docs/how-to-get-the-company-domain).
:param api_token: your personal API token
:param domain: your company domain
"""
base_url = f"https://{domain}.pipedrive.com/api/v1"
def pipedrive_client(method, path="/", parameters=None, url=None, data=None, resource=None):
"""REST tool to interact with Wordpress API.
:param method: one of the HTTP verb (GET, POST, DELETE,...)
:param path: the path of the resource (eg.: `/deals`)
:param parameters: the optional query parameters that will be encoded in the querystring.
:param url: if specified, path and parameters will be ignored (this should be a full URL, eg. `https://www.example.com/wp-json/wp/v2/post`)
:param data: the optional body content of a POST/PATCH/PUT request. It will be encoded as JSON.
:param file_object: an open file-like object that will be uploaded.
:param resource: if this is a string & method is GET, the client will request all the paginated content (it will make 1+ requests as needed).
"""
_params = {**(parameters or {}), 'api_token': api_token}
headers = {'Content-Type': 'application/json'}
if method.lower() == GET and resource:
# Reference for pagination:
# https://pipedrive.readme.io/docs/core-api-concepts-pagination
has_more_items = True
requested_start = 0
resources = []
while has_more_items:
paged_parameters = dict(_params) if _params else {}
if requested_start != 0:
paged_parameters['start'] = requested_start
result = common_client(GET, base_url, path=path,
parameters=paged_parameters, headers=headers)
try:
has_more_items = result['additional_data']['pagination']['more_items_in_collection']
except KeyError:
has_more_items = False
current_resources = result['data']
requested_start += len(current_resources)
resources.extend(current_resources)
return resources
else:
return common_client(method, base_url,
path=path, parameters=_params, data=data, url=url, headers=headers)
return pipedrive_client | /rest_tools-0.7.2.tar.gz/rest_tools-0.7.2/rest_tools/pipedrive.py | 0.804291 | 0.383526 | pipedrive.py | pypi |
from .common import common_client, GET
def get_livestorm_client(apikey, base_url="https://api.livestorm.co/v1"):
"""Returns a callable you can use to interact with Livestorm API.
:param apikey: A token from the Account Settings > Integrations page
(see: https://developers.livestorm.co/docs/authorization)
:param base_url: the default should be changed only if asked from Livestorm
"""
headers = {
"accept": "application/vnd.api+json",
"Authorization": apikey
}
def livestorm_client(method, path, parameters=None, url=None, data=None, resource=False):
"""REST tool to interact with Livestorm API.
The path parameters should always start with a "/" and should NOT include the version (eg: `/events`)
:param method: one of the HTTP verb (GET, POST, DELETE,...)
:param path: the path of the resource (eg.: `/events`)
:param parameters: the optional query parameters that will be encoded in the querystring.
:param url: if specified, path and parameters will be ignored (this should be a full URL, eg. `https://api.livestorm.co/v1`)
:param data: the optional body content of a POST/PATCH/PUT request. It will be encoded as JSON.
:param resource: if true & method is GET, the client will request all the paginated content (it will make 1+ requests as needed).
"""
if method.lower() == GET and resource:
has_more_items = True
current_page = 0
results = []
while has_more_items:
paged_parameters = dict(parameters) if parameters else {}
if current_page != 0:
paged_parameters['page[number]'] = current_page
result = common_client(GET, base_url, path=path, parameters=paged_parameters,
headers=headers)
try:
page_count = result['meta']['page_count']
except KeyError:
page_count = 1
current_page += 1
has_more_items = page_count > current_page
results.extend(result['data'])
else:
response = common_client(method, base_url, path, parameters, url, headers, data)
if response:
results = response['data']
else:
results = response
return results
return livestorm_client | /rest_tools-0.7.2.tar.gz/rest_tools-0.7.2/rest_tools/livestorm.py | 0.769427 | 0.360602 | livestorm.py | pypi |
from rest_tools.common import common_client
DEFAULT_NUMBER_OF_RESULTS = 25
def get_fusionauth_client(api_key:str, base_url:str, number_of_results:int=DEFAULT_NUMBER_OF_RESULTS):
"""Returns a callable you can use to interact with Fusionauth API.
:param api_key: the api key, see: https://fusionauth.io/docs/v1/tech/apis/authentication/#api-key-authentication
:param base_url: the url of your Fusionauth instance
"""
headers = {'Authorization': api_key}
def fusionauth_client(method, path="/", parameters=None, url=None, data=None, resource=None):
"""REST tool to interact with Fusionauth API.
The path parameters should always start with a "/" and should include the word "api" (eg: `/api/user`)
:param method: one of the HTTP verb (GET, POST, DELETE,...)
:param path: the path of the resource (eg.: `/api/user`)
:param parameters: the optional query parameters that will be encoded in the querystring.
:param url: if specified, path and parameters will be ignored (this should be a full URL, eg. `https://fusionauth.example.com/api/user`)
:param data: the optional body content of a POST/PATCH/PUT request. It will be encoded as JSON.
:param resource: if this is a string & method is GET, the client will request all the paginated content (it will make 1+ requests as needed).
"""
if method == "get" and resource:
start_row = 0
resources = []
while True:
paged_parameters = dict(parameters) if parameters else {}
if start_row != 0:
paged_parameters['startRow'] = start_row
if number_of_results != DEFAULT_NUMBER_OF_RESULTS:
paged_parameters['numberOfResults'] = number_of_results
result = common_client(method, base_url, path=path,
parameters=paged_parameters, headers=headers)
if result['total'] == 0:
break
resources.extend(result[resource])
if len(resources) < result['total']:
start_row += number_of_results
else:
break
return resources
return common_client(method, base_url, path=path, parameters=parameters, data=data, url=url,
headers=headers)
return fusionauth_client | /rest_tools-0.7.2.tar.gz/rest_tools-0.7.2/rest_tools/fusionauth.py | 0.811825 | 0.309545 | fusionauth.py | pypi |
import json
from re import compile
from typing import Callable
from .common import get_complete_url, get_response, common_client
def get_canvas_client(access_token:str, base_url:str) -> Callable:
"""Returns a callable you can use to interact with Canvas API.
:param base_url: Your Canvas canonical URL (e.g. https://your-institution.instructure.com)
:param access_token: A personal access_token from your user profile page
(see: https://canvas.instructure.com/doc/api/file.oauth.html#manual-token-generation)
"""
headers = {'Authorization': f'Bearer {access_token}'}
rx = compile(r"<(.*?)>; rel=\"(\w+)\"")
def canvas_client(method, path="/", parameters=None, url=None, data=None, resource=False):
"""REST tool to interact with Canvas API.
The path parameters should always start with a "/" and should include the version (eg: `/api/v1/accounts`)
:param method: one of the HTTP verb (GET, POST, DELETE,...)
:param path: the path of the resource (eg.: `/api/v1/accounts`)
:param parameters: the optional query parameters that will be encoded in the querystring.
:param url: if specified, path and parameters will be ignored (this should be a full URL, eg. `https://example.instructure.com/api/v1/accounts`)
:param data: the optional body content of a POST/PATCH/PUT request. It will be form-encoded.
:param resource: if true & method is GET, the client will request all the paginated content (it will make 1+ requests as needed).
"""
if method.lower() == "get" and resource:
resources = []
next_url = get_complete_url(base_url, path, parameters=parameters, url=url)
while next_url:
response = get_response(method, next_url, headers=headers, data=data)
link_header = response.headers.get('link', '')
links = {rel: url for url, rel in rx.findall(link_header)}
contents = response.text
if contents:
resources.extend(json.loads(contents))
next_url = links.get('next')
return resources
return common_client(method, base_url, path=path, parameters=parameters, url=url,
headers=headers, form_data=data)
return canvas_client | /rest_tools-0.7.2.tar.gz/rest_tools-0.7.2/rest_tools/canvas.py | 0.858081 | 0.254578 | canvas.py | pypi |
from operator import itemgetter
from typing import Callable
import requests
from .common import common_client, expiring, GET, logger
@expiring(itemgetter('exp'))
def get_wordpress_access_token(base_url, api_key, api_secret):
r = requests.post(f"{base_url}/wp/v2/token", data={'api_key': api_key, 'api_secret': api_secret})
try:
r.raise_for_status()
except requests.HTTPError as exc:
logger.error("WP Error (%s): %s", exc, r.text)
raise
token = r.json()
return token
def get_wordpress_client(api_key:str, api_secret:str, base_url:str) -> Callable:
"""Returns a callable you can use to interact with Wordpress API.
:param api_key: Key pair, key
:param api_secret: Key pair, secret. See https://github.com/WP-API/jwt-auth#generate-key-pairs
:param base_url: The installation path of your WP installation; please include `/wp-json` at the end.
"""
def wordpress_client(method, path="/", parameters=None, url=None, data=None, file_object=None, resource=None):
"""REST tool to interact with Wordpress API.
:param method: one of the HTTP verb (GET, POST, DELETE,...)
:param path: the path of the resource (eg.: `/wp/v2/post`)
:param parameters: the optional query parameters that will be encoded in the querystring.
:param url: if specified, path and parameters will be ignored (this should be a full URL, eg. `https://www.example.com/wp-json/wp/v2/post`)
:param data: the optional body content of a POST/PATCH/PUT request. It will be encoded as JSON.
:param file_object: an open file-like object that will be uploaded.
:param resource: if this is a string & method is GET, the client will request all the paginated content (it will make 1+ requests as needed).
"""
token = get_wordpress_access_token(base_url, api_key, api_secret)
headers = {'Authorization': "Bearer {access_token}".format(access_token=token['access_token'])}
if method.lower() == GET and resource:
has_more_items = True
current_page = 1
resources = []
while has_more_items:
paged_parameters = dict(parameters) if parameters else {}
if current_page != 1:
paged_parameters['page'] = current_page
result = common_client(GET, base_url, path=path, parameters=paged_parameters, headers=headers)
try:
total_pages = result['total_pages']
except KeyError:
total_pages = 1
current_page += 1
has_more_items = total_pages > current_page
resources.extend(result[resource])
return resources
elif file_object:
return common_client(method, base_url, path=path, parameters=parameters, url=url,
headers=headers, form_data=data, files={'file': file_object})
else:
return common_client(method, base_url, path=path, parameters=parameters, data=data, url=url,
headers=headers)
return wordpress_client | /rest_tools-0.7.2.tar.gz/rest_tools-0.7.2/rest_tools/wordpress.py | 0.810966 | 0.153169 | wordpress.py | pypi |
"""Console script for rest_uploader."""
import sys
import click
import tempfile
from .rest_uploader import (
watcher,
set_autotag,
set_notebook_id,
set_working_directory,
set_endpoint,
set_token,
set_language,
set_autorotation,
set_moveto,
)
from . import __version__
def parse_argument(arg):
"""Helper function for wild arguments"""
if arg in ["No", "N", "NO", "OFF", "off", "n", "no"]:
arg = "no"
else:
arg = "yes"
return arg
@click.command()
@click.argument(
"path",
type=click.Path(
exists=True,
file_okay=False,
dir_okay=True,
writable=False,
readable=True,
resolve_path=True,
),
)
@click.option(
"-s",
"--server",
"server",
default="127.0.0.1",
help="""Specify the server to which the application"""
""" should connect. Default = "127.0.0.1" """,
)
@click.option(
"-p",
"--port",
"port",
default="41184",
help="""Specify the port to which the application should connect."""
""" Default = 41184 """,
)
@click.option(
"-l",
"--language",
"language",
default="eng",
help="""Specify OCR Language. Refer to Tesseract's documentation found here:
https://github.com/tesseract-ocr/tesseract/wiki""",
)
@click.option(
"-t",
"--autotag",
"autotag",
default="yes",
help="""Specify whether or not to automatically tag notes based on"""
""" OCR'd text. Default = 'yes', specify 'no' if this behavior is"""
""" not desired""",
)
@click.option(
"-d",
"--destination",
"destination",
default="inbox",
help="""Specify the notebook in which to place newly created notes."""
""" Specified notebook must exist or program will exit."""
""" Default = "inbox". """,
)
@click.option(
"-r",
"--autorotation",
"autorotation",
default="yes",
help="""Specify whether to rotate images."""
""" Default = yes (autorotation on, specify 'no' to disable). """,
)
@click.option(
"-o",
"--moveto",
"moveto",
default=tempfile.gettempdir(),
type=click.Path(
exists=True,
file_okay=False,
dir_okay=True,
writable=False,
readable=True,
resolve_path=True,
),
)
@click.version_option(version=__version__)
def main(
path=None,
server="server",
port="port",
language="eng",
autotag="yes",
destination="inbox",
autorotation="yes",
moveto="",
):
"""Console script for rest_uploader.
Define file path to monitor, e.g.
rest_uploader /home/user/Docouments/scans
"""
click.echo("Launching Application " "rest_uploader.cli.main")
set_working_directory()
set_endpoint(server, port)
set_token()
# set_temp_path() # Do I need to do this here?
notebook_id = set_notebook_id(destination.strip())
if notebook_id == "err":
click.echo("Joplin may not be running, please ensure it is open.")
click.echo(" will check again when processing a file.")
elif notebook_id == "":
click.echo(f"Invalid Notebook, check to see if {destination.strip()} exists.")
click.echo(f"Please specify a valid notebook. Quitting application.")
return 0
else:
click.echo(f"Found Notebook ID: {notebook_id}")
set_language(language)
autotag = parse_argument(autotag)
set_autotag(parse_argument(autotag))
autorotation = parse_argument(autorotation)
set_autorotation(autorotation)
moveto = set_moveto(moveto)
click.echo("Language: " + language)
click.echo("Automatically Tag Notes? " + autotag)
click.echo("Destination Notebook: " + destination)
click.echo("Autorotation: " + autorotation)
if moveto == "":
click.echo("Files will remain in the monitoring directory")
else:
click.echo("File move to location: " + moveto)
watcher(path=path)
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover | /rest_uploader-1.19.0.tar.gz/rest_uploader-1.19.0/rest_uploader/cli.py | 0.427994 | 0.172311 | cli.py | pypi |
import datetime
import enum
import typing
import urllib.parse
import warnings
import requests
import xmltodict
try: # speedup
import uvloop
uvloop.install()
import asyncio
except ModuleNotFoundError or ImportError: # no uvloop
import asyncio
try:
import aiohttp
aiohttp_exists = True
except ImportError or ModuleNotFoundError:
aiohttp_exists = False
class VLC_State(enum.Enum):
playing = "playing"
paused = "paused"
stopped = "stopped"
class VLC:
"""
VLC manager class
"""
def __init__(
self,
url: str = "http://localhost:8080",
auth: typing.Union[
tuple, requests.auth.HTTPBasicAuth, list, set
] = requests.auth.HTTPBasicAuth("", ""),
) -> None:
"""
VLC Class
This class will initialize a VLC instance by connect to it using REST API w/ HTTP Basic Auth.
This class is blocking.
If you want to use asynchornous version please install
`aiohttp <https://pypi.org/project/aiohttp/>`_
:param url: VLC url
:param auth: VLC auth
:return: None
"""
self.url = url
if isinstance(auth, (tuple, list, set)):
if len(auth) != 2:
raise ValueError(
"Auth must be a tuple or list of 2 elements which is username and password"
)
self.auth = requests.auth.HTTPBasicAuth(*auth)
else:
self.auth = auth
if not self.connectable:
raise Exception("VLC is not running or REST API is not enabled")
self.full_screen = self.is_fullscreen
self.volume_percentage = False
def __encode_uri(self, url: str) -> bool:
return urllib.parse.quote(url)
def __set_name__(self, owner, name):
self.name = "_" + name
def __getattr__(self, name):
if name not in self.__dict__:
warnings.warn(
"Attribute '{}' is not defined in VLC class or not yet implemented".format(
name
),
UserWarning,
)
return (None,)
return self.__dict__[name]
@property
def is_playing(self):
"""
Check if VLC is playing or not
:return: bool
"""
return (
requests.get(
self.url + "/requests/status.xml?command=pl_status", auth=self.auth
)
== 200
)
@property
def status(self) -> dict:
"""
Show the status & configurations inform of a dictionaries
:return: dict
"""
return xmltodict.parse(
requests.get(self.url + "/requests/status.xml", auth=self.auth).text,
)
@property
def playlist(self) -> dict:
"""
Show the playlist and configurations inform of a dictionaries
:return: dict
"""
return xmltodict.parse(
requests.get(self.url + "/requests/playlist.xml", auth=self.auth).text
)
@property
def connectable(self) -> bool:
"""
Check if VLC REST API is running
:return: bool
"""
try:
return (
requests.get(
self.url + "/requests/status.xml", auth=self.auth
).status_code
== 200
)
except requests.exceptions.ConnectionError:
return False
def stop(self) -> bool:
"""
Stop the current playing media and return back the boolean of the result
:return: bool
"""
return (
requests.get(
self.url + "/requests/status.xml?command=pl_stop", auth=self.auth
).status_code
== 200
)
def clear_playlist(self) -> bool:
"""
Clear the playlist and return back the boolean of the result
:return: bool
"""
return (
requests.get(
self.url + "/requests/status.xml?command=pl_empty", auth=self.auth
).status_code
== 200
)
def play(self, uri: str) -> bool:
"""
Play a media by uri and return back the boolean of the result if success or not
:param uri: media uri
:return: bool
"""
uri = self.__encode_uri(uri)
return (
requests.get(
self.url + "/requests/status.xml?command=in_play&input=" + uri,
auth=self.auth,
).status_code
== 200
)
def append_queue(self, uri: str) -> bool:
"""
Append a media to the queue and return back the boolean of the result if success or not
:param uri: media uri
:return: bool
"""
uri = self.__encode_uri(uri)
return (
requests.get(
self.url + "/requests/status.xml?command=in_enqueue&input=" + uri,
auth=self.auth,
).status_code
== 200
)
def set_volume(self, volume: int, percent: bool = False) -> bool:
"""
Set the volume of VLC and return back the boolean of the result if success or not
:param volume: volume value (0-512 = 0-200%)
:param percent: option for volume is actually percentage or not
:return: bool
"""
if percent:
volume = int(volume * 2.56)
return (
requests.get(
self.url + "/requests/status.xml?command=volume&val=" + str(volume),
auth=self.auth,
).status_code
== 200
)
def set_random(self, random: bool) -> bool:
"""
Set the shuffle state of VLC and return back the boolean of the result if success or not
:param random: random state
:return: bool
"""
return (
requests.get(
self.url
+ "/requests/status.xml?command=pl_random&state="
+ str(random),
auth=self.auth,
).status_code
== 200
)
@property
def is_random(self) -> bool:
"""
A property to get the random state of VLC
:return: bool
"""
content = xmltodict.parse(
requests.get(self.url + "/requests/status.xml", auth=self.auth).text
)
return True if content["root"]["random"] in ("true", "1") else False
def set_repeat_media(self, repeat: bool) -> bool:
"""
Set the repeat state of VLC and return back the boolean of the result if success or not
:param repeat: repeat state
:return: bool
"""
return (
requests.get(
self.url
+ "/requests/status.xml?command=pl_repeat&state="
+ str(repeat),
auth=self.auth,
).status_code
== 200
)
@property
def is_repeat_media(self) -> bool:
"""
A property to get the repeat state of VLC
:return: bool
"""
content = xmltodict.parse(
requests.get(self.url + "/requests/status.xml", auth=self.auth).text
)
return True if content["root"]["repeat"] in ("true", "1") else False
def set_loop_queue(self, loop: bool) -> bool:
"""
Set the loop state of VLC and return back the boolean of the result if success or not
:param loop: loop state
:return: bool
"""
return (
requests.get(
self.url + "/requests/status.xml?command=pl_loop&state=" + str(loop),
auth=self.auth,
).status_code
== 200
)
@property
def is_loop_queue(self) -> bool:
"""
A property to get the loop state of VLC
:return: bool
"""
content = xmltodict.parse(
requests.get(self.url + "/requests/status.xml", auth=self.auth).text
)
return True if content["root"]["loop"] in ("true", "1") else False
def fullscreen(self) -> list:
"""
Set the fullscreen state of VLC and return back the boolean of the result if success or not and the current state of the screen
:return: bool, bool
"""
return (
requests.get(
self.url + "/requests/status.xml?command=fullscreen", auth=self.auth
).status_code
== 200,
self.is_fullscreen,
)
@property
def is_fullscreen(self) -> bool:
"""
Return the current state of VLC if VLC is in fullscreen returns true otherwise false
:return: bool
"""
content = xmltodict.parse(
requests.get(self.url + "/requests/status.xml", auth=self.auth).text
)
return True if content["root"]["fullscreen"] in ("true", "1") else False
""" def set_subtitle_file(self, uri: str) -> bool:
\"""
Set the subtitle file to show in the VLC and returns bool based on successful or not
:return: bool
\"""
\""" uri = self.__encode_uri(uri) \"""
return (
requests.get(
self.url + "/requests/status.xml?command=pl_enqueue&input=" + uri,
auth=self.auth,
).status_code
== 200
) """
def browse(self, uri: str) -> dict:
"""
Give the list of the files and return the dictionaries of XML
:return: dict
"""
uri = self.__encode_uri(uri)
return xmltodict.parse(
requests.get(self.url + "/requests/browse.xml?uri=" + uri).text
)
def previous(self) -> bool:
"""
Revert to previous media and return if request was successful or not
:return: bool
"""
return (
requests.get(
self.url + "/requests/status.xml?command=pl_previous", auth=self.auth
).status_code
== 200
)
def delete(self, uri: str) -> bool:
"""
Delete media off the playlist by finding with the specified URI. Returns bool indicate if request was successful or not
"""
uri = self.__encode_uri(uri)
return (
requests.get(
self.url + "/requests/status.xml?command=pl_delete&id=" + uri,
auth=self.auth,
).status_code
== 200
)
def next(self) -> bool:
"""
Skip to next media and return if request was successful or not
:return: bool
"""
return (
requests.get(
self.url + "/requests/status.xml?command=pl_next", auth=self.auth
).status_code
== 200
)
def clear_history(self) -> bool:
"""
Clear the histories. Returns boolean indicate request is successful or not
:return: bool
"""
return (
requests.get(
self.url + "/requests/status.xml?command=pl_history&val=clear",
auth=self.auth,
).status_code
== 200
)
def pause(self) -> bool:
"""
Pause the media playback. Returns bool indicate request was successful or not
:return: bool
"""
return (
requests.get(
self.url + "/requests/status.xml?command=pl_pause", auth=self.auth
).status_code
== 200
)
@property
def is_paused(self) -> bool:
"""
Check if the media is actually paused or not. Returns bool indicate media is paused or not
:return: bool
"""
content = xmltodict.parse(
requests.get(self.url + "/requests/status.xml", auth=self.auth).text
)
return True if content["root"]["state"] in ("paused", "stopped") else False
def seek(self, time: typing.Union[str, datetime.timedelta, int]) -> bool:
"""
Seeking between time in the media with required arg is time which is supported int,str and datetime.timedelta. Returns bool indicate requests was successful or not
:return: bool
"""
if isinstance(time, datetime.timedelta):
time = time.total_seconds()
return (
requests.get(
self.url + "/requests/status.xml?command=seek&val=" + str(time),
auth=self.auth,
).status_code
== 200
)
@property
def time(self) -> int:
"""
Give the current time media is at (Unit seconds)
:return: int, str
"""
content = xmltodict.parse(
requests.get(self.url + "/requests/status.xml", auth=self.auth).text
)
return content["root"]["time"]
@property
def duration(self) -> float:
"""
Give how long media is. (Unit seconds)
:return: int, str
"""
content = xmltodict.parse(
requests.get(self.url + "/requests/status.xml", auth=self.auth).text
)
return content["root"]["length"]
@property
def position(self) -> float:
"""
Get current bar position (0,1)
:return: float, str
"""
content = xmltodict.parse(
requests.get(self.url + "/requests/status.xml", auth=self.auth).text
)
return content["root"]["position"]
@property
def state(self) -> VLC_State:
"""
Give current state of the playback.
:return: str
"""
content = xmltodict.parse(
requests.get(self.url + "/requests/status.xml", auth=self.auth).text
)
return VLC_State(content["root"]["state"])
@property
def volume(self) -> typing.Union[int, float]:
"""
Get current playback's volume (0-512)
If you want percentage returns then set the property of `volume_percentage` to `True`
:return: int
"""
content = xmltodict.parse(
requests.get(self.url + "/requests/status.xml", auth=self.auth).text
)
if self.volume_percentage:
return float(content["root"]["volume"] / 2.56)
else:
return int(content["root"]["volume"])
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
if not aiohttp_exists:
warnings.warn(
"aiohttp is not installed, so you can't use the async version of this library",
RuntimeWarning,
)
class Async_VLC:
def __init__(self, *args) -> bool:
raise ImportError("No aiohttp exists")
def __getattr__(cls, name) -> bool:
raise ImportError("No aiohttp exists")
class aiohttp_wrap:
def __init__(self, *args) -> bool:
raise ImportError("No aiohttp exists")
def __getattr__(cls, name) -> bool:
raise ImportError("No aiohttp exists")
else:
class dummy:
pass
class aiohttp_wrap:
async def get(self, *args, **kwargs):
async with aiohttp.ClientSession() as session:
async with session.get(*args, **kwargs) as response:
d = dummy()
d.status = response.status
d.status_code = response.status
d.text = await response.text()
return d
async def post(self, *args, **kwargs):
async with aiohttp.ClientSession() as session:
async with session.post(*args, **kwargs) as response:
d = dummy()
d.status = response.status
d.status_code = response.status
d.text = await response.text()
return d
async def put(self, *args, **kwargs):
async with aiohttp.ClientSession() as session:
async with session.put(*args, **kwargs) as response:
d = dummy()
d.status = response.status
d.status_code = response.status
d.text = await response.text()
return d
async def patch(self, *args, **kwargs):
async with aiohttp.ClientSession() as session:
async with session.patch(*args, **kwargs) as response:
d = dummy()
d.status = response.status
d.status_code = response.status
d.text = await response.text()
return d
async def delete(self, *args, **kwargs):
async with aiohttp.ClientSession() as session:
async with session.delete(*args, **kwargs) as response:
d = dummy()
d.status = response.status
d.status_code = response.status
d.text = await response.text()
return d
aiohttp_wrap = aiohttp_wrap()
class Async_VLC:
def __init__(
self,
url: str = "http://localhost:8080",
auth: typing.Union[aiohttp.BasicAuth, tuple, set, list] = aiohttp.BasicAuth(
"", ""
),
) -> None:
"""
VLC Class
This class will initialize a VLC instance by connect to it using REST API w/ HTTP Basic Auth.
This class is blocking.
If you want to use asynchornous version please install
`aiohttp <https://pypi.org/project/aiohttp/>`_
:param url: VLC url
:param auth: VLC auth
:return: None
"""
self.url = url
if isinstance(auth, (tuple, set, list)):
if len(auth) != 2:
raise ValueError(
"Auth must be tuple or list of length 2 which is username and password"
)
self.auth = aiohttp.BasicAuth(*auth)
if not asyncio.run(self.connectable):
raise Exception("VLC is not running or REST API is not enabled")
self.full_screen = None
def __encode_uri(self, url: str) -> bool:
return urllib.parse.quote(url)
def __set_name__(self, owner, name):
self.name = "_" + name
def __getattr__(self, name):
if name not in self.__dict__:
warnings.warn(
"Attribute '{}' is not defined in VLC class or not yet implemented".format(
name
),
UserWarning,
)
return (None,)
return self.__dict__[name]
@property
async def status(self) -> dict:
"""
Show the status & configurations inform of a dictionaries
:return: dict
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml", auth=self.auth
)
return xmltodict.parse(d.text)
@property
async def playlist(self) -> dict:
"""
Show the playlist and configurations inform of a dictionaries
:return: dict
"""
d = await aiohttp_wrap.get(
self.url + "/requests/playlist.xml", auth=self.auth
)
return xmltodict.parse(d.text)
@property
async def connectable(self) -> bool:
"""
Check if VLC REST API is running
:return: bool
"""
try:
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml", auth=self.auth
)
return d.status_code == 200
except aiohttp.client_exceptions.ClientConnectorError:
return False
async def stop(self) -> bool:
"""
Stop the current playing media and return back the boolean of the result
:return: bool
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml?command=pl_stop", auth=self.auth
)
return d.status_code == 200
async def clear_playlist(self) -> bool:
"""
Clear the playlist and return back the boolean of the result
:return: bool
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml?command=pl_empty", auth=self.auth
)
return d.status_code == 200
async def play(self, uri: str) -> bool:
"""
Play a media by uri and return back the boolean of the result if success or not
:param uri: media uri
:return: bool
"""
uri = self.__encode_uri(uri)
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml?command=in_play&input=" + uri,
auth=self.auth,
)
return d.status_code == 200
async def append_queue(self, uri: str) -> bool:
"""
Append a media to the queue and return back the boolean of the result if success or not
:param uri: media uri
:return: bool
"""
uri = self.__encode_uri(uri)
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml?command=in_enqueue&input=" + uri,
auth=self.auth,
)
return d.status_code == 200
async def set_volume(self, volume: int, percent: bool = False) -> bool:
"""
Set the volume of VLC and return back the boolean of the result if success or not
:param volume: volume value (0-512 = 0-200%)
:param percent: option for volume is actually percentage or not
:return: bool
"""
if percent:
volume = int(volume * 2.56)
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml?command=volume&val=" + str(volume),
auth=self.auth,
)
return d.status_code == 200
async def set_random(self, random: bool) -> bool:
"""
Set the shuffle state of VLC and return back the boolean of the result if success or not
:param random: random state
:return: bool
"""
d = await aiohttp_wrap.get(
self.url
+ "/requests/status.xml?command=pl_random&state="
+ str(random).lower(),
auth=self.auth,
)
return d.status_code == 200
@property
async def is_random(self) -> bool:
"""
A property to get the random state of VLC
:return: bool
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml", auth=self.auth
)
content = xmltodict.parse(d.text)
return True if content["root"]["random"] in ("true", "1") else False
async def set_repeat_media(self, repeat: bool) -> bool:
"""
Set the repeat state of VLC and return back the boolean of the result if success or not
:param repeat: repeat state
:return: bool
"""
d = await aiohttp_wrap.get(
self.url
+ "/requests/status.xml?command=pl_repeat&state="
+ str(repeat).lower(),
auth=self.auth,
)
return d.status_code == 200
@property
async def is_repeat_media(self) -> bool:
"""
A property to get the repeat state of VLC
:return: bool
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml", auth=self.auth
)
content = xmltodict.parse(d.text)
return True if content["root"]["repeat"] in ("true", "1") else False
async def set_loop_queue(self, loop: bool) -> bool:
"""
Set the loop state of VLC and return back the boolean of the result if success or not
:param loop: loop state
:return: bool
"""
d = await aiohttp_wrap.get(
self.url
+ "/requests/status.xml?command=pl_loop&state="
+ str(loop).lower(),
auth=self.auth,
)
return d.status_code == 200
@property
async def is_loop_queue(self) -> bool:
"""
A property to get the loop state of VLC
:return: bool
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml", auth=self.auth
)
content = xmltodict.parse(d.text)
return True if content["root"]["loop"] in ("true", "1") else False
async def fullscreen(self) -> bool:
"""
Set the fullscreen state of VLC and return back the boolean of the result if success or not and the current state of the screen
:return: (bool, bool)
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml?command=fullscreen", auth=self.auth
)
return (
d.status_code == 200,
await self.is_fullscreen,
)
@property
async def is_fullscreen(self) -> bool:
"""
Return the current state of VLC if VLC is in fullscreen returns true otherwise false
:return: bool
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml", auth=self.auth
)
content = xmltodict.parse(d.text)
self.full_screen = (
True if content["root"]["fullscreen"] in ("true", "1") else False
)
return True if content["root"]["fullscreen"] in ("true", "1") else False
""" async def set_subtitle_file(self, uri: str) -> bool:
\"""
Set the subtitle file to show in the VLC and returns bool based on successful or not
:return: bool
\"""
\""" uri = self.__encode_uri(uri) \"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml?command=pl_enqueue&input=" + uri,
auth=self.auth,
)
return d.status_code == 200 """
async def browse(self, uri: str) -> dict:
"""
Give the list of the files and return the dictionaries of XML
:return: dict
"""
uri = self.__encode_uri(uri)
d = await aiohttp_wrap.get(self.url + "/requests/browse.xml?uri=" + uri)
return xmltodict.parse(d.text)
async def previous(self) -> bool:
"""
Revert to previous media and return if request was successful or not
:return: bool
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml?command=pl_previous",
auth=self.auth,
)
return d.status_code == 200
async def delete(self, uri: str) -> bool:
"""
Delete media off the playlist by finding with the specified URI. Returns bool indicate if request was successful or not
"""
uri = self.__encode_uri(uri)
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml?command=pl_delete&id=" + uri,
auth=self.auth,
)
return d.status_code == 200
async def next(self) -> bool:
"""
Skip to next media and return if request was successful or not
:return: bool
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml?command=pl_next", auth=self.auth
)
return d.status_code == 200
async def clear_history(self) -> bool:
"""
Clear the histories. Returns boolean indicate request is successful or not
:return: bool
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml?command=pl_history&val=clear",
auth=self.auth,
)
return d.status_code == 200
async def pause(self) -> bool:
"""
Pause the media playback. Returns bool indicate request was successful or not
:return: bool
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml?command=pl_pause", auth=self.auth
)
return d.status_code == 200
@property
async def is_paused(self) -> bool:
"""
Check if the media is actually paused or not. Returns bool indicate media is paused or not
:return: bool
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml", auth=self.auth
)
content = xmltodict.parse(d.text)
return True if content["root"]["state"] in ("paused", "stopped") else False
async def seek(self, time: typing.Union[str, datetime.timedelta, int]) -> bool:
"""
Seeking between time in the media with required arg is time which is supported int,str and datetime.timedelta. Returns bool indicate requests was successful or not
:return: bool
"""
if isinstance(time, datetime.timedelta):
time = time.total_seconds()
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml?command=seek&val=" + str(time),
auth=self.auth,
)
return d.status_code == 200
@property
async def time(self) -> int:
"""
Give the current time media is at (Unit seconds)
:return: int, str
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml", auth=self.auth
)
content = xmltodict.parse(d.text)
return content["root"]["time"]
@property
async def duration(self) -> float:
"""
Give how long media is. (Unit seconds)
:return: int, str
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml", auth=self.auth
)
content = xmltodict.parse(d.text)
return content["root"]["length"]
@property
async def position(self) -> float:
"""
Get current bar position (0,1)
:return: float, str
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml", auth=self.auth
)
content = xmltodict.parse(d.text)
return content["root"]["position"]
@property
async def state(self) -> VLC_State:
"""
Give current state of the playback.
:return: str
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml", auth=self.auth
)
content = xmltodict.parse(d.text)
return VLC_State(content["root"]["state"])
@property
async def volume(self) -> typing.Union[int, float]:
"""
Get current playback's volume (0-512)
If you want percentage returns then set the property of `volume_percentage` to `True`
:return: int
"""
d = await aiohttp_wrap.get(
self.url + "/requests/status.xml", auth=self.auth
)
content = xmltodict.parse(d.text)
if self.volume_percentage:
return float(content["root"]["volume"] / 2.56)
else:
return int(content["root"]["volume"])
@property
async def is_playing(self):
"""
Check if VLC is playing or not
:return: bool
"""
content = xmltodict.parse(
asyncio.run(
aiohttp_wrap.get(self.url + "/requests/status.xml", auth=self.auth)
)
)
return True if content["root"]["state"] in ("playing", "paused") else False | /rest_vlc-1.6.1-py3-none-any.whl/rest_vlc.py | 0.617859 | 0.155784 | rest_vlc.py | pypi |
import requests
import json
import hmac
import hashlib
base_url = "https://dma-api.defi.wiki/orders"
def hashing(secret, query_string):
return hmac.new(
secret.encode("utf-8"), query_string.encode("utf-8"), hashlib.sha256
).hexdigest()
class ZerocapRestClient:
def __init__(self, api_key, secret):
self.api_key = api_key
self.secret = secret
signature = self.encryption_api_key()
url = f"{base_url}/api_key_signature_valid"
headers = {
'Content-Type': 'application/json',
}
data = {
"api_key": self.api_key,
"signature": signature,
}
response = requests.post(url, data=json.dumps(data), headers=headers)
check_pass = False
if response.status_code == 200:
result = response.json()
if result["status_code"] ==200:
check_pass = True
if not check_pass:
raise Exception("ZerocapRestClient init fail")
def encryption_api_key(self):
signature = hashing(self.secret, self.api_key)
return signature
def create_order(self, symbol, side, type, amount, price, client_order_id, note, third_identity_id):
signature = self.encryption_api_key()
if signature == "fail":
return "Create Order Api Key error"
url = f"{base_url}/create_order"
headers = {
'Content-Type': 'application/json',
}
data = {
"symbol": symbol,
"side": side,
"type": type,
"amount": amount,
"price": price,
"client_order_id": client_order_id,
"account_vault": {
"third_identity_id": third_identity_id,
"api_key": self.api_key,
"signature": signature,
"note": note,
}
}
try:
response = requests.post(url, data=json.dumps(data), headers=headers)
if response.status_code == 200:
res = response.json()
return res["data"]
except Exception as e:
return "Dma Server error, create order fail"
def fetch_order(self, id, note, third_identity_id):
signature = self.encryption_api_key()
if signature == "fail":
return "Fetch Order Api Key error"
url = f"{base_url}/fetch_order"
headers = {
'Content-Type': 'application/json',
}
data = {
"id": id,
"account_vault": {
"third_identity_id": third_identity_id,
"api_key": self.api_key,
"signature": signature,
"note": note,
}
}
try:
response = requests.post(url, data=json.dumps(data), headers=headers)
if response.status_code == 200:
res = response.json()
return res["data"]
except Exception as e:
return "Dma Server error, fetch order fail"
def fetch_orders(self, symbol: str, since: int, limit: int, note: str, third_identity_id:str):
signature = self.encryption_api_key()
if signature == "fail":
return "Fetch Orders Api Key error"
url = f"{base_url}/fetch_orders"
headers = {
'Content-Type': 'application/json',
}
data = {
"symbol": symbol,
"since": since,
"limit": limit,
"account_vault": {
"third_identity_id": third_identity_id,
"api_key": self.api_key,
"signature": signature,
"note": note,
}
}
try:
response = requests.post(url, data=json.dumps(data), headers=headers)
if response.status_code == 200:
res = response.json()
return res["data"]
except Exception as e:
return "Dma Server error, fetch orders fail"
if __name__ == "__main__":
api_key = "coinroutes"
secret = "e2d2a9b8-85fe-4a38-b9bd-60e06b58b28a"
client = ZerocapRestClient(api_key, secret) | /rest_zerocap_client-0.0.5.tar.gz/rest_zerocap_client-0.0.5/rest_client/zerpcap_rest_client.py | 0.459076 | 0.172381 | zerpcap_rest_client.py | pypi |
from pyats.connections import BaseConnection
class Implementation(BaseConnection):
'''Rest BaseClass
Baseclass for Rest connection implementation
YAML Example
------------
devices:
PE1:
credentials:
rest:
username: admin
password: cisco123
connections:
a:
protocol: telnet
ip: "1.2.3.4"
port: 2004
vty:
protocol : telnet
ip : "2.3.4.5"
rest:
class: rest.connector.Rest
ip : "2.3.4.5"
port: "443"
protocol: https
credentials:
rest:
username: admin
password: cisco123
Example
-------
>>> from pyats.topology import loader
>>> testbed = loader.load('/users/xxx/xxx/asr22.yaml')
>>> device = testbed.devices['PE1']
>>> device.connect(alias='rest', via='rest')
>>> device.rest.connected
True
'''
def __init__(self, *args, **kwargs):
'''__init__ instantiates a single connection instance.'''
# instanciate BaseConnection
# (could use super...)
BaseConnection.__init__(self, *args, **kwargs)
self._is_connected = False
@property
def connected(self):
'''Is a device connected'''
return self._is_connected
def connect(self, *args, **kwargs):
'''connect to the device via REST'''
raise NotImplementedError
def disconnect(self):
'''disconnect the device for this particular alias'''
raise NotImplementedError
def get(self, *args, **kwargs):
'''GET REST Command to retrieve information from the device'''
raise NotImplementedError
def post(self, *args, **kwargs):
'''POST REST Command to configure information from the device'''
raise NotImplementedError
def put(self, *args, **kwargs):
'''PUT REST Command to update information on the device'''
raise NotImplementedError
def patch(self, *args, **kwargs):
'''PATCH REST Command to update information on the device'''
raise NotImplementedError
def delete(self, *args, **kwargs):
'''DELETE REST Command to delete information from the device'''
raise NotImplementedError
def configure(self, *args, **kwargs):
'''configure - Not implemented for REST'''
raise NotImplementedError('configure is not a supported method for REST. '
'post is probably what you are looking for')
def execute(self, *args, **kwargs):
'''execute - Not implemented for REST'''
raise NotImplementedError('execute is not a supported method for REST. '
'get is probably what you are looking for.') | /rest.connector-23.8-py3-none-any.whl/rest/connector/implementation.py | 0.828245 | 0.183484 | implementation.py | pypi |
import json
import logging
import requests
from requests.exceptions import RequestException
from pyats.connections import BaseConnection
from rest.connector.implementation import Implementation as Imp
from rest.connector.utils import get_username_password
# create a logger for this module
log = logging.getLogger(__name__)
class Implementation(Imp):
'''Rest Implementation for APIC
Implementation of Rest connection to devices based on pyATS BaseConnection
for APIC
YAML Example
------------
devices:
apic1:
connections:
rest:
class: rest.connector.Rest
ip : "2.3.4.5"
credentials:
rest:
username: admin
password: cisco123
Code Example
------------
>>> from pyats.topology import loader
>>> testbed = loader.load('/users/xxx/xxx/testbed.yaml')
>>> device = testbed.devices['apic1']
>>> device.connect(alias='rest', via='rest')
>>> device.rest.connected
True
'''
def __init__(self, *args, **kwargs):
import warnings
warnings.warn(
"This rest.connector library is deprecated and will be removed "
"on v20.3. Please set your testbed to 'os: apic' in order to "
"use the new library.")
super().__init__(*args, **kwargs)
@BaseConnection.locked
def connect(self, timeout=30):
'''connect to the device via REST
Arguments
---------
timeout (int): Timeout value
Raises
------
Exception
---------
If the connection did not go well
Note
----
There is no return from this method. If something goes wrong, an
exception will be raised.
YAML Example
------------
devices:
apic1:
connections:
rest:
class: rest.connector.Rest
ip : "2.3.4.5"
credentials:
rest:
username: admin
password: cisco123
Code Example
------------
>>> from pyats.topology import loader
>>> testbed = loader.load('/users/xxx/xxx/testbed.yaml')
>>> device = testbed.devices['apic1']
>>> device.connect(alias='rest', via='rest')
'''
if self.connected:
return
if 'host' in self.connection_info:
ip = self.connection_info['host']
else:
ip = self.connection_info['ip'].exploded
if 'port' in self.connection_info:
port = self.connection_info['port']
self.url = 'https://{ip}:{port}/'.format(ip=ip, port=port)
else:
self.url = 'https://{ip}/'.format(ip=ip)
login_url = '{f}api/aaaLogin.json'.format(f=self.url)
username, password = get_username_password(self)
payload = {
"aaaUser": {
"attributes": {
"name": username,
"pwd": password,
}
}
}
headers = {
'Content-Type': 'text/plain'
}
log.info("Connecting to '{d}' with alias "
"'{a}'".format(d=self.device.name, a=self.alias))
self.session = requests.Session()
_data = json.dumps(payload)
# Connect to the device via requests
response = self.session.post(login_url, data=_data, timeout=timeout, \
verify=False, headers=headers)
log.info(response)
# Make sure it returned requests.codes.ok
if response.status_code != requests.codes.ok:
# Something bad happened
raise RequestException("Connection to '{ip}' has returned the "
"following code '{c}', instead of the "
"expected status code '{ok}'"\
.format(ip=ip, c=response.status_code,
ok=requests.codes.ok))
self._is_connected = True
log.info("Connected successfully to '{d}'".format(d=self.device.name))
@BaseConnection.locked
def disconnect(self):
'''disconnect the device for this particular alias'''
log.info("Disconnecting from '{d}' with "
"alias '{a}'".format(d=self.device.name, a=self.alias))
try:
self.session.close()
finally:
self._is_connected = False
log.info("Disconnected successfully from "
"'{d}'".format(d=self.device.name))
def isconnected(func):
'''Decorator to make sure session to device is active
There is limitation on the amount of time the session cab be active
on the APIC. However, there are no way to verify if
session is still active unless sending a command. So, its just
faster to reconnect every time.
'''
def decorated(self, *args, **kwargs):
try:
ret = func(self, *args, **kwargs)
except:
self.disconnect()
if 'timeout' in kwargs:
self.connect(timeout=kwargs['timeout'])
else:
self.connect()
ret = func(self, *args, **kwargs)
return ret
return decorated
@BaseConnection.locked
@isconnected
def get(self, dn, query_target='self', rsp_subtree='no', \
query_target_filter='', rsp_prop_include='all', \
rsp_subtree_include='', rsp_subtree_class='',\
expected_status_code=requests.codes.ok, timeout=30):
'''GET REST Command to retrieve information from the device
Arguments
---------
dn (string): Unique distinguished name that describes the object
and its place in the tree.
query_target {self|children|subtree}:
'self': (default) MO itself
'children': just the MO's child objects
'subtree': MO and its child objects
rsp_subtree {no|children|full}: Specifies child object level
included in the response
'no': (default) the response
does not include any children
'children': return only the child
objects
'full': includes the full tree
structure
rsp_prop_include {all|naming-only|config-only}:
'all': all properties of the objects
'naming-only': only the naming properties
'config-only': only configurable properties
rsp_subtree_include (string): specify additional contained objects
or options to be included
rsp_subtree_class (string) : specify classes
query_target_filter (string): filter expression
expected_status_code (int): Expected result
'''
if not self.connected:
raise Exception("'{d}' is not connected for "
"alias '{a}'".format(d=self.device.name,
a=self.alias))
full_url = "{f}{dn}?query-target={qt}&rsp-subtree={rs}"\
"&rsp-prop-include={rpi}"\
.format(f=self.url,
dn=dn,
qt=query_target,
rs=rsp_subtree,
rpi=rsp_prop_include)
if query_target_filter:
full_url += "&query-target-filter={qtf}"\
.format(qtf=query_target_filter)
if rsp_subtree_include:
full_url += "&rsp-subtree-include={rsi}"\
.format(rsi=rsp_subtree_include)
if rsp_subtree_class:
full_url += "&rsp-subtree-class={rsc}"\
.format(rsc=rsp_subtree_class)
log.info("Sending GET command to '{d}':"\
"\nDN: {furl}".format(d=self.device.name, furl=full_url))
response = self.session.get(full_url, timeout=timeout, verify=False)
try:
output = response.json()
except Exception:
output = response.text
log.info("Output received:\n{output}".format(output=
json.dumps(output, indent=2, sort_keys=True)))
# Make sure it returned requests.codes.ok
if response.status_code != expected_status_code:
# Something bad happened
raise RequestException("GET {furl} to {d} has returned the "
"following code '{c}', instead of the "
"expected status code '{e}'"
", got:\n {msg}".format(furl=full_url,
d=self.device.name,
c=response.status_code,
e=expected_status_code,
msg=response.text))
return output
@BaseConnection.locked
@isconnected
def post(self, dn, payload, expected_status_code=requests.codes.ok,
timeout=30):
'''POST REST Command to configure information from the device
Arguments
---------
dn (string): Unique distinguished name that describes the object
and its place in the tree.
payload (dict): Dictionary containing the information to send via
the post
expected_status_code (int): Expected result
timeout (int): Maximum time
'''
if not self.connected:
raise Exception("'{d}' is not connected for "
"alias '{a}'".format(d=self.device.name,
a=self.alias))
# Deal with the dn
full_url = '{f}{dn}'.format(f=self.url, dn=dn)
log.info("Sending POST command to '{d}':"\
"\nDN: {furl}\nPayload:{p}".format(d=self.device.name,
furl=full_url,
p=payload))
# Send to the device
response = self.session.post(full_url, payload, timeout=timeout, \
verify=False)
output = response.json()
log.info("Output received:\n{output}".format(output=output))
# Make sure it returned requests.codes.ok
if response.status_code != expected_status_code:
# Something bad happened
raise RequestException("POST {furl} to {d} has returned the "
"following code '{c}', instead of the "
"expected status code '{e}'"
", got:\n {msg}".format(furl=full_url,
d=self.device.name,
c=response.status_code,
e=expected_status_code,
msg=response.text))
return output
@BaseConnection.locked
@isconnected
def delete(self, dn, expected_status_code=requests.codes.ok, timeout=30):
'''DELETE REST Command to delete information from the device
Arguments
---------
dn (string): Unique distinguished name that describes the object
and its place in the tree.
expected_status_code (int): Expected result
timeout (int): Maximum time
'''
if not self.connected:
raise Exception("'{d}' is not connected for "
"alias '{a}'".format(d=self.device.name,
a=self.alias))
# Deal with the dn
full_url = '{f}{dn}'.format(f=self.url, dn=dn)
log.info("Sending DELETE command to '{d}':"\
"\nDN: {furl}".format(d=self.device.name, furl=full_url))
# Send to the device
response = self.session.delete(full_url, timeout=timeout, verify=False)
output = response.json()
log.info("Output received:\n{output}".format(output=output))
# Make sure it returned requests.codes.ok
if response.status_code != expected_status_code:
# Something bad happened
raise RequestException("DELETE {furl} to {d} has returned the "
"following code '{c}', instead of the "
"expected status code '{e}'"
", got:\n {msg}".format(furl=full_url,
d=self.device.name,
c=response.status_code,
e=expected_status_code,
msg=response.text))
return output | /rest.connector-23.8-py3-none-any.whl/rest/connector/libs/nxos/aci/implementation.py | 0.571527 | 0.169784 | implementation.py | pypi |
import json
import logging
import requests
from requests.exceptions import RequestException
from pyats.connections import BaseConnection
from rest.connector.implementation import Implementation as Imp
from rest.connector.utils import get_token
# create a logger for this module
log = logging.getLogger(__name__)
class Implementation(Imp):
'''Rest Implementation for Webex
Implementation of Rest connection to devices based on pyATS BaseConnection
for Webex
YAML Example
------------
devices:
webex:
os: webex
connections:
rest:
class: rest.connector.Rest
ip : "10.1.1.1"
credentials:
rest:
token: <webexaccesstoken>
Code Example
------------
>>> from pyats.topology import loader
>>> testbed = loader.load('testbed.yaml')
>>> device = testbed.devices['webex']
>>> device.connect(alias='rest', via='rest')
>>> device.rest.connected
True
'''
@BaseConnection.locked
def connect(self, timeout=30):
'''connect to the device via REST
Arguments
---------
timeout (int): Timeout value
Raises
------
Exception
---------
If the connection did not go well
Note
----
There is no return from this method. If something goes wrong, an
exception will be raised.
YAML Example
------------
devices:
webex:
os: webex
connections:
rest:
class: rest.connector.Rest
ip : "10.1.1.1"
credentials:
rest:
token: <webexaccesstoken>
Code Example
------------
>>> from pyats.topology import loader
>>> testbed = loader.load('testbed.yaml')
>>> device = testbed.devices['webex']
>>> device.connect(alias='rest', via='rest')
'''
if self.connected:
return
if 'host' in self.connection_info:
ip = self.connection_info['host']
else:
ip = self.connection_info['ip'].exploded
if 'port' in self.connection_info:
port = self.connection_info['port']
self.url = 'https://{ip}:{port}/'.format(ip=ip, port=port)
else:
self.url = 'https://{ip}/'.format(ip=ip)
login_url = '{f}v1/people/me'.format(f=self.url)
self.token = get_token(self)
self.headers = {
'Authorization': 'Bearer {}'.format(self.token),
}
log.info("Connecting to '{d}' with alias "
"'{a}'".format(d=self.device.name, a=self.alias))
self.session = requests.Session()
# Connect to the device via requests
response = self.session.get(login_url, timeout=timeout, \
headers=self.headers)
log.info(response)
# Make sure it returned requests.codes.ok
if response.status_code != requests.codes.ok:
# Something bad happened
raise RequestException("Connection to '{ip}' has returned the "
"following code '{c}', instead of the "
"expected status code '{ok}'"\
.format(ip=ip, c=response.status_code,
ok=requests.codes.ok))
self._is_connected = True
log.info("Connected successfully to '{d}'".format(d=self.device.name))
@BaseConnection.locked
def disconnect(self):
'''disconnect the device for this particular alias'''
log.info("Disconnecting from '{d}' with "
"alias '{a}'".format(d=self.device.name, a=self.alias))
try:
self.session.close()
finally:
self._is_connected = False
log.info("Disconnected successfully from "
"'{d}'".format(d=self.device.name))
def isconnected(func):
'''Decorator to make sure session to device is active
There is limitation on the amount of time the session cab be active
on the APIC. However, there are no way to verify if
session is still active unless sending a command. So, its just
faster to reconnect every time.
'''
def decorated(self, *args, **kwargs):
# Check if connected
try:
self.disconnect()
if 'timeout' in kwargs:
self.connect(timeout=kwargs['timeout'])
else:
self.connect()
finally:
ret = func(self, *args, **kwargs)
return ret
return decorated
@BaseConnection.locked
def _request(self, method, dn, **kwargs):
""" Wrapper to send REST command to device
Args:
method (str): session request method
dn (str): rest endpoint
Returns:
response.json() or response.text
Raises:
RequestException if response is not ok
"""
if not self.connected:
raise Exception("'{d}' is not connected for alias '{a}'".format(
d=self.device.name, a=self.alias))
# Deal with the dn
full_url = '{f}{dn}'.format(f=self.url, dn=dn)
if 'data' in kwargs:
p = kwargs['data']
elif 'json' in kwargs:
p = kwargs['json']
else:
p = ''
expected_return_code = kwargs.pop('expected_return_code', None)
log.info("Sending {method} command to '{d}':"
"\nDN: {furl}\nPayload:{p}".format(method=method,
d=self.device.name,
furl=full_url,
p=p))
# Send to the device
response = self.session.request(method=method, url=full_url, **kwargs)
# An expected return code was provided. Ensure the response has this code.
if expected_return_code:
if response.status_code != expected_return_code:
raise RequestException(
"'{c}' result code has been returned for '{d}'.\n"
"Expected '{expected_c}' result code.\n"
"Response from server: {r}".format(
c=response.status_code,
d=self.device.name,
expected_c=expected_return_code,
r=response.text))
else:
# No expected return code provided. Make sure it was successful.
try:
response.raise_for_status()
except Exception:
raise RequestException("'{c}' result code has been returned "
"for '{d}'.\nResponse from server: "
"{r}".format(d=self.device.name,
c=response.status_code,
r=response.text))
log.info("Response from '{dev}':\n"
"Result Code: {c}\n"
"Response: {r}".format(dev=self.device.name,
c=response.status_code,
r=response.text))
# In case the response cannot be decoded into json
# warn and return the raw text
if response.text:
try:
output = response.json()
except Exception:
log.warning('Could not decode json. Returning text!')
output = response.text
else:
output = response.text
return output
@BaseConnection.locked
@isconnected
def get(self, dn, headers=None, timeout=30, **kwargs):
""" GET REST Command to retrieve information from the device
Args:
dn (str): Unique distinguished name that describes the object
and its place in the tree.
headers (dict): Headers to send with the rest call
timeout (int): Maximum time to allow rest call to return
Returns:
response.json() or response.text
Raises:
RequestException if response is not ok
"""
if not headers:
headers = self.headers
if headers and 'Authorization' not in headers:
headers.update({'Authorization': 'Bearer {}'.format(self.token)})
return self._request('GET',
dn,
headers=headers,
timeout=timeout,
**kwargs)
@BaseConnection.locked
@isconnected
def post(self, dn, payload, headers=None, timeout=30, **kwargs):
"""POST REST Command to configure new information on the device
Args:
dn (string): Unique distinguished name that describes the object
and its place in the tree.
payload (dict): Dictionary containing the information to send via
the post
headers (dict): Headers to send with the rest call
timeout (int): Maximum time
Returns:
response.json() or response.text
Raises:
RequestException if response is not ok
"""
if not headers:
headers = self.headers
if headers and 'Authorization' not in headers:
headers.update({'Authorization': 'Bearer {}'.format(self.token)})
if isinstance(payload, str):
payload = json.loads(payload)
return self._request('POST',
dn,
data=payload,
headers=headers,
timeout=timeout,
**kwargs)
@BaseConnection.locked
@isconnected
def delete(self, dn, headers=None, timeout=30, **kwargs):
"""DELETE REST Command to delete information from the device
Args
dn (string): Unique distinguished name that describes the object
and its place in the tree.
headers (dict): Headers to send with the rest call
timeout (int): Maximum time
Returns:
response.json() or response.text
Raises:
RequestException if response is not ok
"""
if not headers:
headers = self.headers
if headers and 'Authorization' not in headers:
headers.update({'Authorization': 'Bearer {}'.format(self.token)})
return self._request('DELETE',
dn,
headers=headers,
timeout=timeout,
**kwargs)
@BaseConnection.locked
@isconnected
def put(self, dn, payload, headers=None, timeout=30, **kwargs):
"""PUT REST Command to update existing information on the device
Args
dn (string): Unique distinguished name that describes the object
and its place in the tree.
payload (dict): Dictionary containing the information to send via
the post
headers (dict): Headers to send with the rest call
timeout (int): Maximum time
Returns:
response.json() or response.text
Raises:
RequestException if response is not ok
"""
if not headers:
headers = self.headers
if headers and 'Authorization' not in headers:
headers.update({'Authorization': 'Bearer {}'.format(self.token)})
if type(payload) == str:
payload = json.loads(payload)
return self._request('PUT',
dn,
data=payload,
headers=headers,
timeout=timeout,
**kwargs) | /rest.connector-23.8-py3-none-any.whl/rest/connector/libs/webex/implementation.py | 0.64713 | 0.16248 | implementation.py | pypi |
import json
import logging
import requests
import time
import urllib.parse
from requests.exceptions import RequestException
from pyats.connections import BaseConnection
from rest.connector.implementation import Implementation as Imp
from rest.connector.utils import get_username_password
# create a logger for this module
log = logging.getLogger(__name__)
class Implementation(Imp):
"""Rest Implementation for ND
Implementation of Rest connection to devices based on pyATS BaseConnection
for ND
YAML Example
------------
devices:
nd1:
os: nd
connections:
rest:
class: rest.connector.Rest
ip : "2.3.4.5"
credentials:
rest:
username: admin
password: cisco123
Code Example
------------
>>> from pyats.topology import loader
>>> testbed = loader.load('/users/xxx/xxx/testbed.yaml')
>>> device = testbed.devices['nd1']
>>> device.connect(alias='rest', via='rest')
>>> device.rest.connected
True
"""
@BaseConnection.locked
def connect(self, timeout=30, retries=3, retry_wait=10):
"""connect to the device via REST
Arguments
---------
timeout (int): Timeout value
retries (int): Max retries on request exception (default: 3)
retry_wait (int): Seconds to wait before retry (default: 10)
Raises
------
Exception
---------
If the connection did not go well
Note
----
There is no return from this method. If something goes wrong, an
exception will be raised.
YAML Example
------------
devices:
nd1:
os: nd
connections:
rest:
class: rest.connector.Rest
ip : "2.3.4.5"
credentials:
rest:
username: admin
password: cisco123
Code Example
------------
>>> from pyats.topology import loader
>>> testbed = loader.load('/users/xxx/xxx/testbed.yaml')
>>> device = testbed.devices['nd1']
>>> device.connect(alias='rest', via='rest')
"""
if self.connected:
return
if 'host' in self.connection_info:
ip = self.connection_info['host']
else:
ip = self.connection_info['ip'].exploded
if 'port' in self.connection_info:
port = self.connection_info['port']
self.url = 'https://{ip}:{port}/'.format(ip=ip, port=port)
else:
self.url = 'https://{ip}/'.format(ip=ip)
login_url = '{f}login'.format(f=self.url)
username, password = get_username_password(self)
payload = {
"userName": username,
"userPasswd": password,
"domain": "DefaultAuth"
}
headers = {
'Content-Type': 'application/json'
}
log.info("Connecting to '{d}' with alias "
"'{a}'".format(d=self.device.name, a=self.alias))
self.session = requests.Session()
_data = json.dumps(payload)
for _ in range(retries):
try:
# Connect to the device via requests
response = self.session.post(login_url, data=_data, timeout=timeout,
verify=False, headers=headers)
log.info(response)
# Make sure it returned requests.codes.ok
if response.status_code != requests.codes.ok:
# Something bad happened
raise RequestException("Connection to '{ip}' has returned the "
"following code '{c}', instead of the "
"expected status code '{ok}'"
.format(ip=ip, c=response.status_code,
ok=requests.codes.ok))
break
except Exception:
log.warning('Request to {} failed. Waiting {} seconds before retrying\n'.format(
self.device.name, retry_wait), exc_info=True)
time.sleep(retry_wait)
else:
raise ConnectionError('Connection to {} failed'.format(self.device.name))
self._is_connected = True
log.info("Connected successfully to '{d}'".format(d=self.device.name))
@BaseConnection.locked
def disconnect(self):
"""disconnect the device for this particular alias"""
log.info("Disconnecting from '{d}' with "
"alias '{a}'".format(d=self.device.name, a=self.alias))
try:
self.session.close()
finally:
self._is_connected = False
log.info("Disconnected successfully from "
"'{d}'".format(d=self.device.name))
def isconnected(func):
"""Decorator to make sure session to device is active
There is limitation on the amount of time the session can be active
on the ND. However, there is no way to verify if
session is still active unless sending a command. So, it's just
faster to reconnect every time.
"""
def decorated(self, *args, **kwargs):
try:
ret = func(self, *args, **kwargs)
except:
self.disconnect()
if 'timeout' in kwargs:
self.connect(timeout=kwargs['timeout'])
else:
self.connect()
ret = func(self, *args, **kwargs)
return ret
return decorated
@BaseConnection.locked
@isconnected
def get(self, api_url, expected_status_code=requests.codes.ok,
timeout=30, retries=3, retry_wait=10):
"""GET REST Command to retrieve information from the device
Arguments
---------
api_url (string): subdirectory part of the API URL
expected_status_code (int): Expected result
timeout (int): Maximum time
retries (int): Number of retries in case of transmission error
retry_wait (int): Seconds to wait between retries
"""
if not self.connected:
raise Exception("'{d}' is not connected for "
"alias '{a}'".format(d=self.device.name,
a=self.alias))
#Eliminate the starting "/" if present, as it may cause problems
api_url = api_url.lstrip('/')
# Deal with the url
full_url = "{f}{api_url}".format(f=self.url, api_url=api_url)
log.info("Sending GET command to '{d}':" \
"\nURL: {furl}".format(d=self.device.name, furl=full_url))
for _ in range(retries):
try:
response = self.session.get(full_url, timeout=timeout, verify=False)
break
except Exception:
log.warning('Request to {} failed. Waiting {} seconds before retrying\n'.format(
self.device.name, retry_wait), exc_info=True)
time.sleep(retry_wait)
else:
raise RequestException('Sending "{furl}" to "{d}" has failed '
'after {tries} tries'.format(furl=full_url,
d=self.device.name,
tries=retries))
try:
output = response.json()
except Exception:
output = response.text
log.info("Output received:\n{output}".format(output=
json.dumps(output, indent=2, sort_keys=True)))
# Make sure it returned requests.codes.ok
if response.status_code != expected_status_code:
# Something bad happened
raise RequestException("GET {furl} to {d} has returned the "
"following code '{c}', instead of the "
"expected status code '{e}'"
", got:\n {msg}".format(furl=full_url,
d=self.device.name,
c=response.status_code,
e=expected_status_code,
msg=response.text))
return output
@BaseConnection.locked
@isconnected
def post(self, api_url, payload, expected_status_code=requests.codes.ok,
content_type='json', timeout=30, retries=3, retry_wait=10):
"""POST REST Command to configure information from the device
Arguments
---------
api_url (string): subdirectory part of the API URL
payload (dict|string): Information to send via the post
expected_status_code (int): Expected result
content_type(string): json / xml / form
timeout (int): Maximum time
"""
if not self.connected:
raise Exception("'{d}' is not connected for "
"alias '{a}'".format(d=self.device.name,
a=self.alias))
# Eliminate the starting "/" if present, as it may cause problems
api_url = api_url.lstrip('/')
# Deal with the url
full_url = '{f}{api_url}'.format(f=self.url, api_url=api_url)
log.info("Sending POST command to '{d}':" \
"\nURL: {furl}\nPayload:{p}".format(d=self.device.name,
furl=full_url,
p=payload))
headers = {'form': 'application/x-www-form-urlencoded',
'json': 'application/json',
'xml': 'application/xml'}
if content_type == 'form' and isinstance(payload, dict):
payload = urllib.parse.urlencode(payload, safe=':!')
elif content_type == 'xml':
if isinstance(payload, dict):
raise ValueError("Error on {d} during POST command: "
"Payload needs to be string in xml format if used "
"in conjunction with content_type='xml' argument"
.format(d=self.device.name))
for _ in range(retries):
try:
# Send to the device
if isinstance(payload, dict):
response = self.session.post(full_url, json=payload, timeout=timeout,
verify=False)
else:
response = self.session.post(full_url, data=payload, timeout=timeout,
verify=False,
headers={'Content-type': headers.get(content_type,
headers['json'])})
break
except Exception:
log.warning('Request to {} failed. Waiting {} seconds before retrying\n'.format(
self.device.name, retry_wait), exc_info=True)
time.sleep(retry_wait)
else:
raise RequestException('Sending "{furl}" to "{d}" has failed '
'after {tries} tries'.format(furl=full_url,
d=self.device.name,
tries=retries))
try:
# response might not pe in JSON format
output = response.json()
log.info("Output received:\n{output}".format(output=output))
except Exception:
output = response.content if content_type == 'xml' else response.text
log.info(f"'Post' operation did not return a json response: {output}")
# Make sure it returned requests.codes.ok
if response.status_code != expected_status_code:
# Something bad happened
raise RequestException("POST {furl} to {d} has returned the "
"following code '{c}', instead of the "
"expected status code '{e}'"
", got:\n {msg}".format(furl=full_url,
d=self.device.name,
c=response.status_code,
e=expected_status_code,
msg=response.text))
return output
@BaseConnection.locked
@isconnected
def put(self, api_url, payload=None, expected_status_code=requests.codes.ok,
content_type='json', timeout=30, retries=3, retry_wait=10):
"""PUT REST Command to configure information from the device
Arguments
---------
api_url (string): subdirectory part of the API URL
payload (dict|string): Information to send via the put action
expected_status_code (int): Expected result
content_type(string): json / xml / form
timeout (int): Maximum time
"""
if not self.connected:
raise Exception("'{d}' is not connected for "
"alias '{a}'".format(d=self.device.name,
a=self.alias))
# Eliminate the starting "/" if present, as it may cause problems
api_url = api_url.lstrip('/')
# Deal with the url
full_url = '{f}{api_url}'.format(f=self.url, api_url=api_url)
log.info("Sending PUT command to '{d}':" \
"\nURL: {furl}\nPayload:{p}".format(d=self.device.name,
furl=full_url,
p=payload))
headers = {'form': 'application/x-www-form-urlencoded',
'json': 'application/json',
'xml': 'application/xml'}
if content_type == 'form' and isinstance(payload, dict):
payload = urllib.parse.urlencode(payload, safe=':!')
elif content_type == 'xml':
if isinstance(payload, dict):
raise ValueError("Error on {d} during PUT command: "
"Payload must to be string in xml format if used "
"in conjunction with content_type='xml' argument"
.format(d=self.device.name))
for _ in range(retries):
try:
# Send to the device
if isinstance(payload, dict):
response = self.session.put(full_url, json=payload, timeout=timeout,
verify=False)
else:
response = self.session.put(full_url, data=payload, timeout=timeout,
verify=False,
headers={'Content-type': headers.get(content_type,
headers['json'])})
break
except Exception:
log.warning('Request to {} failed. Waiting {} seconds before retrying\n'.format(
self.device.name, retry_wait), exc_info=True)
time.sleep(retry_wait)
else:
raise RequestException('Sending "{furl}" to "{d}" has failed '
'after {tries} tries'.format(furl=full_url,
d=self.device.name,
tries=retries))
try:
# response might not pe in JSON format
output = response.json()
log.info("Output received:\n{output}".format(output=output))
except Exception:
output = response.content if content_type == 'xml' else response.text
log.info(f"'Put' operation did not return a json response: {output}")
# Make sure it returned requests.codes.ok
if response.status_code != expected_status_code:
# Something bad happened
raise RequestException("PUT {furl} to {d} has returned the "
"following code '{c}', instead of the "
"expected status code '{e}'"
", got:\n {msg}".format(furl=full_url,
d=self.device.name,
c=response.status_code,
e=expected_status_code,
msg=response.text))
return output
@BaseConnection.locked
@isconnected
def delete(self, api_url, expected_status_code=requests.codes.ok,
timeout=30, retries=3, retry_wait=10):
"""DELETE REST Command to delete information from the device
Arguments
---------
api_url (string): subdirectory part of the API URL
expected_status_code (int): Expected result
timeout (int): Maximum time
"""
if not self.connected:
raise Exception("'{d}' is not connected for "
"alias '{a}'".format(d=self.device.name,
a=self.alias))
# Eliminate the starting "/" if present, as it may cause problems
api_url = api_url.lstrip('/')
# Deal with the url
full_url = '{f}{api_url}'.format(f=self.url, api_url=api_url)
log.info("Sending DELETE command to '{d}':" \
"\nURL: {furl}".format(d=self.device.name, furl=full_url))
for i in range(retries):
try:
# Send to the device
response = self.session.delete(full_url, timeout=timeout, verify=False)
break
except Exception:
log.warning('Request to {} failed. Waiting {} seconds before retrying\n'.format(
self.device.name, retry_wait), exc_info=True)
time.sleep(retry_wait)
else:
raise RequestException('Sending "{furl}" to "{d}" has failed '
'after {tries} tries'.format(furl=full_url,
d=self.device.name,
tries=retries))
try:
# response might not pe in JSON format
output = response.json()
log.info("Output received:\n{output}".format(output=output))
except ValueError:
output = response.text
log.info(f"'Delete' operation did not return a json response: {output}")
# Make sure it returned requests.codes.ok
if response.status_code != expected_status_code:
# Something bad happened
raise RequestException("DELETE {furl} to {d} has returned the "
"following code '{c}', instead of the "
"expected status code '{e}'"
", got:\n {msg}".format(furl=full_url,
d=self.device.name,
c=response.status_code,
e=expected_status_code,
msg=response.text))
return output | /rest.connector-23.8-py3-none-any.whl/rest/connector/libs/nd/implementation.py | 0.657868 | 0.154631 | implementation.py | pypi |
import time
import json
import logging
import requests
from requests.exceptions import RequestException
from pyats.connections import BaseConnection
from rest.connector.implementation import Implementation as Imp
from rest.connector.utils import get_username_password
# create a logger for this module
log = logging.getLogger(__name__)
class Implementation(Imp):
'''Rest Implementation for APIC
Implementation of Rest connection to devices based on pyATS BaseConnection
for APIC
YAML Example
------------
devices:
apic1:
connections:
rest:
class: rest.connector.Rest
ip : "2.3.4.5"
credentials:
rest:
username: admin
password: cisco123
Code Example
------------
>>> from pyats.topology import loader
>>> testbed = loader.load('/users/xxx/xxx/testbed.yaml')
>>> device = testbed.devices['apic1']
>>> device.connect(alias='rest', via='rest')
>>> device.rest.connected
True
'''
@BaseConnection.locked
def connect(self, timeout=30, retries=3, retry_wait=10):
'''connect to the device via REST
Arguments
---------
timeout (int): Timeout value
retries (int): Max retries on request exception (default: 3)
retry_wait (int): Seconds to wait before retry (default: 10)
Raises
------
Exception
---------
If the connection did not go well
Note
----
There is no return from this method. If something goes wrong, an
exception will be raised.
YAML Example
------------
devices:
apic1:
connections:
rest:
class: rest.connector.Rest
ip : "2.3.4.5"
credentials:
rest:
username: admin
password: cisco123
Code Example
------------
>>> from pyats.topology import loader
>>> testbed = loader.load('/users/xxx/xxx/testbed.yaml')
>>> device = testbed.devices['apic1']
>>> device.connect(alias='rest', via='rest')
'''
if self.connected:
return
if 'host' in self.connection_info:
ip = self.connection_info['host']
else:
ip = self.connection_info['ip'].exploded
if 'port' in self.connection_info:
port = self.connection_info['port']
self.url = 'https://{ip}:{port}/'.format(ip=ip, port=port)
else:
self.url = 'https://{ip}/'.format(ip=ip)
login_url = '{f}api/aaaLogin.json'.format(f=self.url)
username, password = get_username_password(self)
payload = {
"aaaUser": {
"attributes": {
"name": username,
"pwd": password,
}
}
}
headers = {
'Content-Type': 'text/plain'
}
log.info("Connecting to '{d}' with alias "
"'{a}'".format(d=self.device.name, a=self.alias))
self.session = requests.Session()
_data = json.dumps(payload)
for _ in range(retries):
try:
# Connect to the device via requests
response = self.session.post(login_url, data=_data, timeout=timeout,
verify=False, headers=headers)
log.info(response)
# Make sure it returned requests.codes.ok
if response.status_code != requests.codes.ok:
log.error(response.text)
# Something bad happened
raise RequestException("Connection to '{ip}' has returned the "
"following code '{c}', instead of the "
"expected status code '{ok}'"
.format(ip=ip, c=response.status_code,
ok=requests.codes.ok))
break
except Exception:
log.warning('Request to {} failed. Waiting {} seconds before retrying\n'.format(
self.device.name, retry_wait), exc_info=True)
time.sleep(retry_wait)
else:
raise ConnectionError('Connection to {} failed'.format(self.device.name))
self._is_connected = True
log.info("Connected successfully to '{d}'".format(d=self.device.name))
@BaseConnection.locked
def disconnect(self):
'''disconnect the device for this particular alias'''
log.info("Disconnecting from '{d}' with "
"alias '{a}'".format(d=self.device.name, a=self.alias))
try:
self.session.close()
finally:
self._is_connected = False
log.info("Disconnected successfully from "
"'{d}'".format(d=self.device.name))
def isconnected(func):
'''Decorator to make sure session to device is active
There is limitation on the amount of time the session cab be active
on the APIC. However, there are no way to verify if
session is still active unless sending a command. So, its just
faster to reconnect every time.
'''
def decorated(self, *args, **kwargs):
try:
ret = func(self, *args, **kwargs)
except:
self.disconnect()
if 'timeout' in kwargs:
self.connect(timeout=kwargs['timeout'])
else:
self.connect()
ret = func(self, *args, **kwargs)
return ret
return decorated
@BaseConnection.locked
@isconnected
def get(self, dn, query_target='self', rsp_subtree='no', \
query_target_filter='', rsp_prop_include='all', \
rsp_subtree_include='', rsp_subtree_class='',\
target_subtree_class='', order_by='', \
expected_status_code=requests.codes.ok, timeout=30):
'''GET REST Command to retrieve information from the device
Arguments
---------
dn (string): Unique distinguished name that describes the object
and its place in the tree.
query_target {self|children|subtree}:
'self': (default) MO itself
'children': just the MO's child objects
'subtree': MO and its child objects
rsp_subtree {no|children|full}: Specifies child object level
included in the response
'no': (default) the response
does not include any children
'children': return only the child
objects
'full': includes the full tree
structure
rsp_prop_include {all|naming-only|config-only}:
'all': all properties of the objects
'naming-only': only the naming properties
'config-only': only configurable properties
rsp_subtree_include (string): specify additional contained objects
or options to be included
rsp_subtree_class (string) : specify classes
target_subtree_class (string): specify subtree classes
query_target_filter (string): filter expression
order_by (string): sort the query response by one or
more properties of a class
expected_status_code (int): Expected result
'''
if not self.connected:
raise Exception("'{d}' is not connected for "
"alias '{a}'".format(d=self.device.name,
a=self.alias))
full_url = "{f}{dn}?query-target={qt}&rsp-subtree={rs}"\
"&rsp-prop-include={rpi}"\
.format(f=self.url,
dn=dn,
qt=query_target,
rs=rsp_subtree,
rpi=rsp_prop_include)
if query_target_filter:
full_url += "&query-target-filter={qtf}"\
.format(qtf=query_target_filter)
if rsp_subtree_include:
full_url += "&rsp-subtree-include={rsi}"\
.format(rsi=rsp_subtree_include)
if rsp_subtree_class:
full_url += "&rsp-subtree-class={rsc}"\
.format(rsc=rsp_subtree_class)
if target_subtree_class:
full_url += "&target-subtree-class={tsc}"\
.format(tsc=target_subtree_class)
if order_by:
full_url += "&order-by={ob}"\
.format(ob=order_by)
log.info("Sending GET command to '{d}':"\
"\nDN: {furl}".format(d=self.device.name, furl=full_url))
response = self.session.get(full_url, timeout=timeout, verify=False)
try:
output = response.json()
except Exception:
output = response.text
log.info("Output received:\n{output}".format(output=
json.dumps(output, indent=2, sort_keys=True)))
# Make sure it returned requests.codes.ok
if response.status_code != expected_status_code:
# Something bad happened
raise RequestException("GET {furl} to {d} has returned the "
"following code '{c}', instead of the "
"expected status code '{e}'"
", got:\n {msg}".format(furl=full_url,
d=self.device.name,
c=response.status_code,
e=expected_status_code,
msg=response.text))
return output
@BaseConnection.locked
@isconnected
def post(self, dn, payload, xml_payload=False,
expected_status_code=requests.codes.ok, timeout=30):
'''POST REST Command to configure information from the device
Arguments
---------
dn (string): Unique distinguished name that describes the object
and its place in the tree.
payload (dict|string): Information to send via the post command
xml_payload (bool): Set to True if payload is in XML format
expected_status_code (int): Expected result
timeout (int): Maximum time
'''
if not self.connected:
raise Exception("'{d}' is not connected for "
"alias '{a}'".format(d=self.device.name,
a=self.alias))
# Deal with the dn
full_url = '{f}{dn}'.format(f=self.url, dn=dn)
log.info("Sending POST command to '{d}':"\
"\nDN: {furl}\nPayload:{p}".format(d=self.device.name,
furl=full_url,
p=payload))
# Send to the device
if xml_payload:
if isinstance(payload, dict):
raise ValueError("Error on {d} during POST command: "
"Payload needs to be string in xml format if "
"used in conjunction with xml_payload argument"
.format(d=self.device.name))
response = self.session.post(full_url, data=payload, timeout=timeout,
verify=False,
headers={'Content-type': 'application/xml'})
output = response.content
else:
if isinstance(payload, dict):
response = self.session.post(full_url, json=payload, timeout=timeout,
verify=False)
else:
response = self.session.post(full_url, data=payload, timeout=timeout,
verify=False,
headers={'Content-type': 'application/json'})
output = response.json()
log.info("Output received:\n{output}".format(output=output))
# Make sure it returned requests.codes.ok
if response.status_code != expected_status_code:
# Something bad happened
raise RequestException("POST {furl} to {d} has returned the "
"following code '{c}', instead of the "
"expected status code '{e}'"
", got:\n {msg}".format(furl=full_url,
d=self.device.name,
c=response.status_code,
e=expected_status_code,
msg=response.text))
return output
@BaseConnection.locked
@isconnected
def delete(self, dn, expected_status_code=requests.codes.ok, timeout=30):
'''DELETE REST Command to delete information from the device
Arguments
---------
dn (string): Unique distinguished name that describes the object
and its place in the tree.
expected_status_code (int): Expected result
timeout (int): Maximum time
'''
if not self.connected:
raise Exception("'{d}' is not connected for "
"alias '{a}'".format(d=self.device.name,
a=self.alias))
# Deal with the dn
full_url = '{f}{dn}'.format(f=self.url, dn=dn)
log.info("Sending DELETE command to '{d}':"\
"\nDN: {furl}".format(d=self.device.name, furl=full_url))
# Send to the device
response = self.session.delete(full_url, timeout=timeout, verify=False)
output = response.json()
log.info("Output received:\n{output}".format(output=output))
# Make sure it returned requests.codes.ok
if response.status_code != expected_status_code:
# Something bad happened
raise RequestException("DELETE {furl} to {d} has returned the "
"following code '{c}', instead of the "
"expected status code '{e}'"
", got:\n {msg}".format(furl=full_url,
d=self.device.name,
c=response.status_code,
e=expected_status_code,
msg=response.text))
return output | /rest.connector-23.8-py3-none-any.whl/rest/connector/libs/apic/implementation.py | 0.590779 | 0.164718 | implementation.py | pypi |
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import serializers
class DynModelSerializer(serializers.ModelSerializer):
"""
Factory to include/exclude fields dynamically
"""
default_error_messages = {
'does_not_exist': 'Invalid pk "{pk_value}" - object does not exist.',
'incorrect_type': 'Incorrect type. Expected pk value, received {data_type}.',
}
def __init__(self, *args, **kwargs):
self._requested_fields = []
s_type = type(self)
assert hasattr(self.Meta, 'model'), '{} Meta.model param is required'.format(s_type)
assert hasattr(self.Meta, 'fields_param'), \
'{} Meta.fields_param param cannot be empty'.format(s_type)
self.nested = kwargs.pop('nested', False)
self.default_fields = list(getattr(self.Meta, 'default_fields', ['id']))
self.limit_fields = kwargs.pop('limit_fields', getattr(self.Meta, 'limit_fields', False))
self.set_allowed_fields(kwargs.pop('fields', None))
for field_name in self.default_fields:
assert field_name in self._allowed_fields, '{} Meta.default_fields contains field "{}"'\
'not in Meta.fields list'.format(s_type,
field_name)
super(DynModelSerializer, self).__init__(*args, **kwargs)
if self.limit_fields:
request = self.get_request()
if request:
# don't limit fields for write operations
if request.method == 'GET':
self.exclude_omitted_fields(request)
for field_name, field_name in self.fields.items():
# assigning parent context to allow child serializers to update their fields
# later
field_name._context = self.context
else:
self.limit_fields = False
self.request_all_allowed_fields()
else:
self.request_all_allowed_fields()
else:
self.request_all_allowed_fields()
def get_value(self, data):
if not self.nested or self.field_name not in data:
return super().get_value(data)
return data[self.field_name]
def to_internal_value(self, data):
"""
Allow pass value of nested field, assume that passed value is PK
"""
if not self.nested:
return super().to_internal_value(data)
try:
return self.Meta.model.objects.get(pk=data)
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=data)
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data).__name__)
def request_all_allowed_fields(self):
for field in self._allowed_fields:
self._requested_fields.append(field)
def get_request(self):
return self.context.get('request')
def set_allowed_fields(self, fields=None):
if hasattr(self.Meta, 'fields'):
meta_fields = list(self.Meta.fields)
else:
meta_fields = []
for field_obj in self.Meta.model._meta.get_fields():
meta_fields.append(field_obj.name)
include = meta_fields if not fields else [
field for field in meta_fields if field in fields]
exclude = set(getattr(self.Meta, 'exclude', []))
self._allowed_fields = list(set(include) - exclude)
def exclude_omitted_fields(self, request):
field_names = self.get_requested_field_names(request)
self._requested_fields = field_names
if field_names is not None:
# Drop any fields that are not specified in passed query param
allowed = set(field_names)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name)
for field_name in self.fields:
field = self.fields[field_name]
if isinstance(field, serializers.ListSerializer):
if isinstance(field.child, DynModelSerializer):
field.child.exclude_omitted_fields(request)
elif isinstance(field, DynModelSerializer):
field.exclude_omitted_fields(request)
def get_requested_field_names(self, request):
fields_param_value = request.query_params.get(self.Meta.fields_param)
if fields_param_value is not None:
requested_fields = fields_param_value.split(',')
if requested_fields:
return list(set(self._allowed_fields).intersection(set(requested_fields)))
return list(self.default_fields)
def is_field_requested(self, field_name):
"""
Return True if the field requested by client
"""
if self.limit_fields:
request = self.get_request()
assert request, "request can't be None in limit_fields mode"
requested_fields = self.get_requested_field_names(request)
return field_name in requested_fields
else:
# always return field if limit_fields flag set to False
return True
def get_field_names(self, declared_fields, info):
"""
Return only requested and allowed field names
"""
return self._requested_fields
class Meta:
model = None
fields = []
default_fields = []
fields_param = None | /rest_framework_dyn_serializer-1.3.1.tar.gz/rest_framework_dyn_serializer-1.3.1/rest_framework_dyn_serializer.py | 0.708414 | 0.217369 | rest_framework_dyn_serializer.py | pypi |
from django.contrib.auth import models as auth_models
from rest_framework import viewsets, generics, renderers, parsers
from rest_framework.decorators import list_route, detail_route
from rest_framework.response import Response
from rest_framework_ember import mixins, utils
from ..serializers.identity import IdentitySerializer
from ..serializers.post import PostSerializer
class Identity(mixins.MultipleIDMixin, viewsets.ModelViewSet):
queryset = auth_models.User.objects.all()
serializer_class = IdentitySerializer
@list_route()
def empty_list(self, request):
"""
This is a hack/workaround to return an empty result on a list
endpoint because the delete operation in the test_empty_pluralization
test doesn't prevent the /identities endpoint from still returning
records when called in the same test. Suggestions welcome.
"""
self.queryset = self.queryset.filter(pk=None)
return super(Identity, self).list(request)
# demonstrate sideloading data for use at app boot time
@list_route()
def posts(self, request):
self.resource_name = False
identities = self.queryset
posts = [{'id': 1, 'title': 'Test Blog Post'}]
data = {
u'identities': IdentitySerializer(identities, many=True).data,
u'posts': PostSerializer(posts, many=True).data,
}
return Response(utils.format_keys(data, format_type='camelize'))
@detail_route()
def manual_resource_name(self, request, *args, **kwargs):
self.resource_name = 'data'
return super(Identity, self).retrieve(request, args, kwargs)
class GenericIdentity(generics.GenericAPIView):
"""
An endpoint that uses DRF's default format so we can test that.
GET /identities/generic
"""
serializer_class = IdentitySerializer
allowed_methods = ['GET']
renderer_classes = (renderers.JSONRenderer, )
parser_classes = (parsers.JSONParser, )
def get_queryset(self):
return auth_models.User.objects.all()
def get(self, request, pk=None):
"""
GET request
"""
obj = self.get_object()
return Response(IdentitySerializer(obj).data) | /rest_framework_ember-1.3.2.tar.gz/rest_framework_ember-1.3.2/example/api/resources/identity.py | 0.579995 | 0.186317 | identity.py | pypi |
class Node:
"""Represents a node in a URL path tree
This class represents a single node in the URL path tree. Each node in the
tree may have an associated endpoint, and may have any number of children
nodes. An object is considered Node-like if it defines add() and get()
functions that can be called using the same arguments given for those
functions in this class. It should also define the endpoint attribute,
but depending on usage, that may not be necessary.
Attributes:
endpoint: the resource class associated with the current path, or None
"""
def __init__ (self):
self.children = {}
self.endpoint = None
self.varname = None
self.variable = None
def add (self, name, child=None):
"""Add a subtree with the given name
Args:
name: the segment of the path corresponding to the new subtree
child: some object (usually Node-like) representing the new subtree.
If child is None, the function will create a new child.
Returns:
the newly added child object
Raises:
RuntimeError: indicates incorrect use of this function
ValueError: name already identifies an existing subtree
"""
if name.startswith("<") and name.endswith(">"):
if self.variable is None:
self.varname = name[1:-1]
self.variable = child if child is not None else Node()
return self.variable
else:
raise RuntimeError("Attempted to add path variable twice")
elif name in self.children:
raise ValueError("name already in use: \"{}\"".format(name))
if child is None:
child = Node()
self.children[name] = child
return child
def get (self, name):
"""Fetch the subtree of the given name
Args:
name: The name of the subtree
Returns:
an object representing the subtree, or None, if name is not found
Raises:
ValueError: If you use angle-bracket-enclosed path variables, you
must always use the same name for the variable of a particular
Node. This error indicates that a path was added that violated
this rule.
"""
if name.startswith("<") and name.endswith(">"):
name = name[1:-1]
if self.varname is not None and name != self.varname:
msg = "Path variable names do not match: \"{}\", \"{}\""
raise ValueError(msg.format(self.varname, name))
return self.variable
try:
return self.children[name]
except KeyError:
if self.variable is not None:
return Variable(self.varname, name, self.variable)
class Variable:
"""Wrapper for a Node object reached by traversing a path variable
Usage of this object is identical to that of Node. They are distinguished
by the presence of the "value" attribute.
Attributes:
name: the angle-bracket-enclosed variable name given to define the path
that led to this variable (angle brackets have been removed)
value: the value of this variable, extracted from the requested URL
node: the wrapped Node
"""
def __init__ (self, name, value, node):
self.name = name
self.value = value
self.node = node
def add (self, *args, **kwargs):
return self.node.add(*args, **kwargs)
def get (self, *args, **kwargs):
return self.node.get(*args, **kwargs)
@property
def endpoint (self):
return self.node.endpoint | /restapi_wsgi-0.0.1-py3-none-any.whl/restapi/node.py | 0.915503 | 0.636042 | node.py | pypi |
import re
import sys
import datetime
import calendar
import email.utils as eut
from time import mktime
import jsonpickle
import dateutil.parser
from requests.utils import quote
class APIHelper(object):
"""A Helper Class for various functions associated with API Calls.
This class contains static methods for operations that need to be
performed during API requests. All of the methods inside this class are
static methods, there is no need to ever initialise an instance of this
class.
"""
@staticmethod
def merge_dicts(dict1, dict2):
"""Merges two dictionaries into one as a shallow copy.
Args:
dict1 (dict): The first dictionary.
dict2 (dict): The second dictionary.
Returns:
dict: A dictionary containing key value pairs
from both the argument dictionaries. In the case
of a key conflict, values from dict2 are used
and those from dict1 are lost.
"""
temp = dict1.copy()
temp.update(dict2)
return temp
@staticmethod
def json_serialize(obj):
"""JSON Serialization of a given object.
Args:
obj (object): The object to serialize.
Returns:
str: The JSON serialized string of the object.
"""
if obj is None:
return None
# Resolve any Names if it's one of our objects that needs to have this called on
if isinstance(obj, list):
value = list()
for item in obj:
if hasattr(item, "_names"):
value.append(APIHelper.to_dictionary(item))
else:
value.append(item)
obj = value
else:
if hasattr(obj, "_names"):
obj = APIHelper.to_dictionary(obj)
return jsonpickle.encode(obj, False)
@staticmethod
def json_deserialize(json, unboxing_function=None, as_dict=False):
"""JSON Deserialization of a given string.
Args:
json (str): The JSON serialized string to deserialize.
Returns:
dict: A dictionary representing the data contained in the
JSON serialized string.
"""
if json is None:
return None
try:
decoded = jsonpickle.decode(json)
except ValueError:
return json
if unboxing_function is None:
return decoded
if as_dict:
return {k: unboxing_function(v) for k, v in decoded.items()}
elif isinstance(decoded, list):
return [unboxing_function(element) for element in decoded]
else:
return unboxing_function(decoded)
@staticmethod
def get_content_type(value):
"""Get content type header for oneof.
Args:
value: The value passed by the user.
Returns:
dict: A dictionary representing the data contained in the
JSON serialized string.
"""
if value is None:
return None
primitive = (int, str, bool, float)
if type(value) in primitive:
return 'text/plain; charset=utf-8'
else:
return 'application/json; charset=utf-8'
@staticmethod
def get_schema_path(path):
"""Return the Schema's path
Returns:
string : returns Correct schema path
"""
path = path.replace('\\models', '\\schemas').replace('/models', '/schemas').replace(".py", ".json")
return path
@staticmethod
def serialize_array(key, array, formatting="indexed"):
"""Converts an array parameter to a list of key value tuples.
Args:
key (str): The name of the parameter.
array (list): The value of the parameter.
formatting (str): The type of key formatting expected.
Returns:
list: A list with key value tuples for the array elements.
"""
tuples = []
if sys.version_info[0] < 3:
serializable_types = (str, int, long, float, bool, datetime.date, APIHelper.CustomDate)
else:
serializable_types = (str, int, float, bool, datetime.date, APIHelper.CustomDate)
if isinstance(array[0], serializable_types):
if formatting == "unindexed":
tuples += [("{0}[]".format(key), element) for element in array]
elif formatting == "indexed":
tuples += [("{0}[{1}]".format(key, index), element) for index, element in enumerate(array)]
elif formatting == "plain":
tuples += [(key, element) for element in array]
else:
raise ValueError("Invalid format provided.")
else:
tuples += [("{0}[{1}]".format(key, index), element) for index, element in enumerate(array)]
return tuples
@staticmethod
def append_url_with_template_parameters(url, parameters):
"""Replaces template parameters in the given url.
Args:
url (str): The query url string to replace the template parameters.
parameters (dict): The parameters to replace in the url.
Returns:
str: URL with replaced parameters.
"""
# Parameter validation
if url is None:
raise ValueError("URL is None.")
if parameters is None:
return url
# Iterate and replace parameters
for key in parameters:
value = parameters[key]['value']
encode = parameters[key]['encode']
replace_value = ''
# Load parameter value
if value is None:
replace_value = ''
elif isinstance(value, list):
replace_value = "/".join((quote(str(x), safe='') if encode else str(x)) for x in value)
else:
replace_value = quote(str(value), safe='') if encode else str(value)
url = url.replace('{{{0}}}'.format(key), str(replace_value))
return url
@staticmethod
def append_url_with_query_parameters(url,
parameters,
array_serialization="plain"):
"""Adds query parameters to a URL.
Args:
url (str): The URL string.
parameters (dict): The query parameters to add to the URL.
array_serialization (str): The format of array parameter serialization.
Returns:
str: URL with added query parameters.
"""
# Parameter validation
if url is None:
raise ValueError("URL is None.")
if parameters is None:
return url
for key, value in parameters.items():
seperator = '&' if '?' in url else '?'
if value is not None:
if isinstance(value, list):
value = [element for element in value if element]
if array_serialization == "csv":
url += "{0}{1}={2}".format(
seperator,
key,
",".join(quote(str(x), safe='') for x in value)
)
elif array_serialization == "psv":
url += "{0}{1}={2}".format(
seperator,
key,
"|".join(quote(str(x), safe='') for x in value)
)
elif array_serialization == "tsv":
url += "{0}{1}={2}".format(
seperator,
key,
"\t".join(quote(str(x), safe='') for x in value)
)
else:
url += "{0}{1}".format(
seperator,
"&".join(("{0}={1}".format(k, quote(str(v), safe='')))
for k, v in APIHelper.serialize_array(key, value, array_serialization))
)
else:
url += "{0}{1}={2}".format(seperator, key, quote(str(value), safe=''))
return url
@staticmethod
def clean_url(url):
"""Validates and processes the given query Url to clean empty slashes.
Args:
url (str): The given query Url to process.
Returns:
str: Clean Url as string.
"""
# Ensure that the urls are absolute
regex = "^https?://[^/]+"
match = re.match(regex, url)
if match is None:
raise ValueError('Invalid Url format.')
protocol = match.group(0)
index = url.find('?')
query_url = url[len(protocol): index if index != -1 else None]
query_url = re.sub("//+", "/", query_url)
parameters = url[index:] if index != -1 else ""
return protocol + query_url + parameters
@staticmethod
def form_encode_parameters(form_parameters,
array_serialization="plain"):
"""Form encodes a dictionary of form parameters
Args:
form_parameters (dictionary): The given dictionary which has
atleast one model to form encode.
array_serialization (str): The format of array parameter serialization.
Returns:
dict: A dictionary of form encoded properties of the model.
"""
encoded = []
for key, value in form_parameters.items():
encoded += APIHelper.form_encode(value, key, array_serialization)
return encoded
@staticmethod
def form_encode(obj,
instance_name,
array_serialization="indexed"):
"""Encodes a model in a form-encoded manner such as person[Name]
Args:
obj (object): The given Object to form encode.
instance_name (string): The base name to appear before each entry
for this object.
array_serialization (string): The format of array parameter serialization.
Returns:
dict: A dictionary of form encoded properties of the model.
"""
retval = []
# If we received an object, resolve it's field names.
if hasattr(obj, "_names"):
obj = APIHelper.to_dictionary(obj)
if obj is None:
return []
elif isinstance(obj, list):
for element in APIHelper.serialize_array(instance_name, obj, array_serialization):
retval += APIHelper.form_encode(element[1], element[0], array_serialization)
elif isinstance(obj, dict):
for item in obj:
retval += APIHelper.form_encode(obj[item], instance_name + "[" + item + "]", array_serialization)
else:
retval.append((instance_name, obj))
return retval
@staticmethod
def to_dictionary(obj):
"""Creates a dictionary representation of a class instance. The
keys are taken from the API description and may differ from language
specific variable names of properties.
Args:
obj: The object to be converted into a dictionary.
Returns:
dictionary: A dictionary form of the model with properties in
their API formats.
"""
dictionary = dict()
# Loop through all properties in this model
for name in obj._names:
value = getattr(obj, name)
if isinstance(value, list):
# Loop through each item
dictionary[obj._names[name]] = list()
for item in value:
dictionary[obj._names[name]].append(APIHelper.to_dictionary(item) if hasattr(item, "_names") else item)
elif isinstance(value, dict):
# Loop through each item
dictionary[obj._names[name]] = dict()
for key in value:
dictionary[obj._names[name]][key] = APIHelper.to_dictionary(value[key]) if hasattr(value[key], "_names") else value[key]
else:
dictionary[obj._names[name]] = APIHelper.to_dictionary(value) if hasattr(value, "_names") else value
# Return the result
return dictionary
@staticmethod
def when_defined(func, value):
return func(value) if value else None
class CustomDate(object):
""" A base class for wrapper classes of datetime.
This class contains methods which help in
appropriate serialization of datetime objects.
"""
def __init__(self, dtime, value=None):
self.datetime = dtime
if not value:
self.value = self.from_datetime(dtime)
else:
self.value = value
def __repr__(self):
return str(self.value)
def __getstate__(self):
return self.value
def __setstate__(self, state):
pass
class HttpDateTime(CustomDate):
""" A wrapper class for datetime to support HTTP date format."""
@classmethod
def from_datetime(cls, date_time):
return eut.formatdate(timeval=mktime(date_time.timetuple()),
localtime=False, usegmt=True)
@classmethod
def from_value(cls, value):
dtime = datetime.datetime.fromtimestamp(eut.mktime_tz(eut.parsedate_tz(value)))
return cls(dtime, value)
class UnixDateTime(CustomDate):
""" A wrapper class for datetime to support Unix date format."""
@classmethod
def from_datetime(cls, date_time):
return calendar.timegm(date_time.utctimetuple())
@classmethod
def from_value(cls, value):
dtime = datetime.datetime.utcfromtimestamp(float(value))
return cls(dtime, float(value))
class RFC3339DateTime(CustomDate):
""" A wrapper class for datetime to support Rfc 3339 format."""
@classmethod
def from_datetime(cls, date_time):
return date_time.isoformat()
@classmethod
def from_value(cls, value):
dtime = dateutil.parser.parse(value)
return cls(dtime, value) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/api_helper.py | 0.660391 | 0.356923 | api_helper.py | pypi |
from restapisdk.api_helper import APIHelper
class HttpRequest(object):
"""Information about an HTTP Request including its method, headers,
parameters, URL, and Basic Auth details
Attributes:
http_method (HttpMethodEnum): The HTTP Method that this request should
perform when called.
headers (dict): A dictionary of headers (key : value) that should be
sent along with the request.
query_url (string): The URL that the request should be sent to.
parameters (dict): A dictionary of parameters that are to be sent along
with the request in the form body of the request
"""
def __init__(self,
http_method,
query_url,
headers=None,
query_parameters=None,
parameters=None,
files=None):
"""Constructor for the HttpRequest class
Args:
http_method (HttpMethodEnum): The HTTP Method.
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be included
in the body.
files (dict, optional): Files to be sent with the request.
"""
self.http_method = http_method
self.query_url = query_url
self.headers = headers
self.query_parameters = query_parameters
self.parameters = parameters
self.files = files
def add_header(self, name, value):
""" Add a header to the HttpRequest.
Args:
name (string): The name of the header.
value (string): The value of the header.
"""
self.headers[name] = value
def add_parameter(self, name, value):
""" Add a parameter to the HttpRequest.
Args:
name (string): The name of the parameter.
value (string): The value of the parameter.
"""
self.parameters[name] = value
def add_query_parameter(self, name, value):
""" Add a query parameter to the HttpRequest.
Args:
name (string): The name of the query parameter.
value (string): The value of the query parameter.
"""
self.query_url = APIHelper.append_url_with_query_parameters(
self.query_url,
{name: value}
)
self.query_url = APIHelper.clean_url(self.query_url) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/http/http_request.py | 0.869673 | 0.193128 | http_request.py | pypi |
from restapisdk.http.http_method_enum import HttpMethodEnum
from restapisdk.http.http_request import HttpRequest
class HttpClient(object):
"""An interface for the methods that an HTTP Client must implement
This class should not be instantiated but should be used as a base class
for HTTP Client classes.
"""
def execute_as_string(self, request):
"""Execute a given HttpRequest to get a string response back
Args:
request (HttpRequest): The given HttpRequest to execute.
Returns:
HttpResponse: The response of the HttpRequest.
"""
raise NotImplementedError("Please Implement this method")
def execute_as_binary(self, request):
"""Execute a given HttpRequest to get a binary response back
Args:
request (HttpRequest): The given HttpRequest to execute.
Returns:
HttpResponse: The response of the HttpRequest.
"""
raise NotImplementedError("Please Implement this method")
def convert_response(self, response, binary):
"""Converts the Response object of the HttpClient into an
HttpResponse object.
Args:
response (dynamic): The original response object.
Returns:
HttpResponse: The converted HttpResponse object.
"""
raise NotImplementedError("Please Implement this method")
def get(self, query_url,
headers={},
query_parameters={}):
"""Create a simple GET HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.GET,
query_url,
headers,
query_parameters,
None,
None)
def head(self, query_url,
headers={},
query_parameters={}):
"""Create a simple HEAD HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.HEAD,
query_url,
headers,
query_parameters,
None,
None)
def post(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple POST HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be included
in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.POST,
query_url,
headers,
query_parameters,
parameters,
files)
def put(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple PUT HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be included
in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.PUT,
query_url,
headers,
query_parameters,
parameters,
files)
def patch(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple PATCH HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be included
in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.PATCH,
query_url,
headers,
query_parameters,
parameters,
files)
def delete(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple DELETE HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be
included in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.DELETE,
query_url,
headers,
query_parameters,
parameters,
files) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/http/http_client.py | 0.909193 | 0.210401 | http_client.py | pypi |
from restapisdk.models.group_name_and_id_input import GroupNameAndIDInput
class ApiRestV2GroupAddgroupRequest(object):
"""Implementation of the 'Api Rest V2 Group Addgroup Request' model.
TODO: type model description here.
Attributes:
name (string): Name of the group
id (string): The GUID of the group
groups (list of GroupNameAndIDInput): A JSON array of group names or
GUIDs or both. When both are given then id is considered
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"id": 'id',
"groups": 'groups'
}
def __init__(self,
name=None,
id=None,
groups=None):
"""Constructor for the ApiRestV2GroupAddgroupRequest class"""
# Initialize members of the class
self.name = name
self.id = id
self.groups = groups
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
groups = None
if dictionary.get('groups') is not None:
groups = [GroupNameAndIDInput.from_dictionary(x) for x in dictionary.get('groups')]
# Return an object of this model
return cls(name,
id,
groups)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_group_addgroup_request.py | 0.621541 | 0.344113 | api_rest_v_2_group_addgroup_request.py | pypi |
from restapisdk.models.meta_object_input import MetaObjectInput
class ApiRestV2MetadataTagUnassignRequest(object):
"""Implementation of the 'Api Rest V2 Metadata Tag Unassign Request' model.
TODO: type model description here.
Attributes:
name (string): Name of the tag
id (string): The GUID of the tag
meta_object (list of MetaObjectInput): Metadata object details
"""
# Create a mapping from Model property names to API property names
_names = {
"meta_object": 'metaObject',
"name": 'name',
"id": 'id'
}
def __init__(self,
meta_object=None,
name=None,
id=None):
"""Constructor for the ApiRestV2MetadataTagUnassignRequest class"""
# Initialize members of the class
self.name = name
self.id = id
self.meta_object = meta_object
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
meta_object = None
if dictionary.get('metaObject') is not None:
meta_object = [MetaObjectInput.from_dictionary(x) for x in dictionary.get('metaObject')]
name = dictionary.get('name')
id = dictionary.get('id')
# Return an object of this model
return cls(meta_object,
name,
id)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_metadata_tag_unassign_request.py | 0.6705 | 0.208743 | api_rest_v_2_metadata_tag_unassign_request.py | pypi |
class ApiRestV2GroupRemoveprivilegeRequest(object):
"""Implementation of the 'Api Rest V2 Group Removeprivilege Request' model.
TODO: type model description here.
Attributes:
name (string): Name of the group
id (string): The GUID of the group to query.
privileges (list of PrivilegeEnum): List of privileges
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"id": 'id',
"privileges": 'privileges'
}
def __init__(self,
name=None,
id=None,
privileges=None):
"""Constructor for the ApiRestV2GroupRemoveprivilegeRequest class"""
# Initialize members of the class
self.name = name
self.id = id
self.privileges = privileges
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
privileges = dictionary.get('privileges')
# Return an object of this model
return cls(name,
id,
privileges)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_group_removeprivilege_request.py | 0.662032 | 0.393094 | api_rest_v_2_group_removeprivilege_request.py | pypi |
from restapisdk.models.group_name_and_id_input import GroupNameAndIDInput
class ApiRestV2UserUpdateRequest(object):
"""Implementation of the 'Api Rest V2 User Update Request' model.
TODO: type model description here.
Attributes:
name (string): Name of the user account. The username string must be
unique.
id (string): The GUID of the user account
display_name (string): A display name string for the user, usually
their first and last name.
visibility (VisibilityEnum): Visibility of the user. The visibility
attribute is set to DEFAULT when creating a user. Setting this to
DEFAULT makes a user visible to other users and user groups, and
thus allows them to share objects
mail (string): Email id associated with the user account
password (string): Password for the user account.
state (StateEnum): Status of user account. acitve or inactive.
notify_on_share (bool): User preference for receiving email
notifications when another ThoughtSpot user shares answers or
pinboards.
show_walk_me (bool): The user preference for revisiting the onboarding
experience.
analyst_onboarding_complete (bool): ThoughtSpot provides an
interactive guided walkthrough to onboard new users. The
onboarding experience leads users through a set of actions to help
users get started and accomplish their tasks quickly. The users
can turn off the Onboarding experience and access it again when
they need assistance with the ThoughtSpot UI.
mtype (Type2Enum): Type of user. LOCAL_USER indicates that the user is
created locally in the ThoughtSpot system.
groups (list of GroupNameAndIDInput): A JSON array of group names or
GUIDs or both. When both are given then id is considered
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"id": 'id',
"display_name": 'displayName',
"visibility": 'visibility',
"mail": 'mail',
"password": 'password',
"state": 'state',
"notify_on_share": 'notifyOnShare',
"show_walk_me": 'showWalkMe',
"analyst_onboarding_complete": 'analystOnboardingComplete',
"mtype": 'type',
"groups": 'groups'
}
def __init__(self,
name=None,
id=None,
display_name=None,
visibility='DEFAULT',
mail=None,
password=None,
state='ACTIVE',
notify_on_share=True,
show_walk_me=True,
analyst_onboarding_complete=True,
mtype='LOCAL_USER',
groups=None):
"""Constructor for the ApiRestV2UserUpdateRequest class"""
# Initialize members of the class
self.name = name
self.id = id
self.display_name = display_name
self.visibility = visibility
self.mail = mail
self.password = password
self.state = state
self.notify_on_share = notify_on_share
self.show_walk_me = show_walk_me
self.analyst_onboarding_complete = analyst_onboarding_complete
self.mtype = mtype
self.groups = groups
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
display_name = dictionary.get('displayName')
visibility = dictionary.get("visibility") if dictionary.get("visibility") else 'DEFAULT'
mail = dictionary.get('mail')
password = dictionary.get('password')
state = dictionary.get("state") if dictionary.get("state") else 'ACTIVE'
notify_on_share = dictionary.get("notifyOnShare") if dictionary.get("notifyOnShare") else True
show_walk_me = dictionary.get("showWalkMe") if dictionary.get("showWalkMe") else True
analyst_onboarding_complete = dictionary.get("analystOnboardingComplete") if dictionary.get("analystOnboardingComplete") else True
mtype = dictionary.get("type") if dictionary.get("type") else 'LOCAL_USER'
groups = None
if dictionary.get('groups') is not None:
groups = [GroupNameAndIDInput.from_dictionary(x) for x in dictionary.get('groups')]
# Return an object of this model
return cls(name,
id,
display_name,
visibility,
mail,
password,
state,
notify_on_share,
show_walk_me,
analyst_onboarding_complete,
mtype,
groups)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_user_update_request.py | 0.592431 | 0.312357 | api_rest_v_2_user_update_request.py | pypi |
from restapisdk.models.group_name_and_id import GroupNameAndID
from restapisdk.models.user_name_and_id import UserNameAndID
class UserResponse(object):
"""Implementation of the 'UserResponse' model.
TODO: type model description here.
Attributes:
name (string): Username of the user account
display_name (string): Display name of the user account
id (string): GUID of the user account
visibility (string): Visibility of the user account
mail (string): Email of the user account
groups (list of GroupNameAndID): Name of the group to which user
account is added
privileges (list of string): Privileges assigned to user account
tags (list of string): Tags assigned to the user
state (string): Indicates if the user account is active or inactive
notify_on_share (bool): Indicates if the email should be sent when
object is shared with the user
show_walk_me (bool): Indicates if the walk me should be shown when
logging in
analyst_onboarding_complete (bool): Indicates if the onboarding is
completed for the user
first_login (int): Indicates if the use is logging in for the first
time
welcome_email_sent (bool): Indicates if the welcome email is sent to
email associated with the user account
is_deleted (bool): Indicates if the user account is deleted
is_hidden (bool): Indicates if the user account is hidden
is_external (bool): Indicates if the user account is from external
system isDeprecated
is_deprecated (bool): TODO: type description here.
complete (bool): Indicates if the all the properties of user account
is provided
is_super_user (bool): Indicates if the user account is super user
is_system_principal (bool): Indicates if the user account is system
principal
mtype (string): Indicates the type of user account
parenttype (string): Indicates the type of parent object
tenant_id (string): Tenant id associated with the user account
index_version (int): TODO: type description here.
generation_num (int): TODO: type description here.
created (float): Date and time when user account was created
modified (float): Date and time of last modification of user account
author (UserNameAndID): TODO: type description here.
modified_by (UserNameAndID): TODO: type description here.
owner (UserNameAndID): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"display_name": 'displayName',
"id": 'id',
"visibility": 'visibility',
"mail": 'mail',
"groups": 'groups',
"privileges": 'privileges',
"tags": 'tags',
"state": 'state',
"notify_on_share": 'notifyOnShare',
"show_walk_me": 'showWalkMe',
"analyst_onboarding_complete": 'analystOnboardingComplete',
"first_login": 'firstLogin',
"welcome_email_sent": 'welcomeEmailSent',
"is_deleted": 'isDeleted',
"is_hidden": 'isHidden',
"is_external": 'isExternal',
"is_deprecated": 'isDeprecated',
"complete": 'complete',
"is_super_user": 'isSuperUser',
"is_system_principal": 'isSystemPrincipal',
"mtype": 'type',
"parenttype": 'parenttype',
"tenant_id": 'tenantId',
"index_version": 'indexVersion',
"generation_num": 'generationNum',
"created": 'created',
"modified": 'modified',
"author": 'author',
"modified_by": 'modifiedBy',
"owner": 'owner'
}
def __init__(self,
name=None,
display_name=None,
id=None,
visibility=None,
mail=None,
groups=None,
privileges=None,
tags=None,
state=None,
notify_on_share=None,
show_walk_me=None,
analyst_onboarding_complete=None,
first_login=None,
welcome_email_sent=None,
is_deleted=None,
is_hidden=None,
is_external=None,
is_deprecated=None,
complete=None,
is_super_user=None,
is_system_principal=None,
mtype=None,
parenttype=None,
tenant_id=None,
index_version=None,
generation_num=None,
created=None,
modified=None,
author=None,
modified_by=None,
owner=None):
"""Constructor for the UserResponse class"""
# Initialize members of the class
self.name = name
self.display_name = display_name
self.id = id
self.visibility = visibility
self.mail = mail
self.groups = groups
self.privileges = privileges
self.tags = tags
self.state = state
self.notify_on_share = notify_on_share
self.show_walk_me = show_walk_me
self.analyst_onboarding_complete = analyst_onboarding_complete
self.first_login = first_login
self.welcome_email_sent = welcome_email_sent
self.is_deleted = is_deleted
self.is_hidden = is_hidden
self.is_external = is_external
self.is_deprecated = is_deprecated
self.complete = complete
self.is_super_user = is_super_user
self.is_system_principal = is_system_principal
self.mtype = mtype
self.parenttype = parenttype
self.tenant_id = tenant_id
self.index_version = index_version
self.generation_num = generation_num
self.created = created
self.modified = modified
self.author = author
self.modified_by = modified_by
self.owner = owner
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
display_name = dictionary.get('displayName')
id = dictionary.get('id')
visibility = dictionary.get('visibility')
mail = dictionary.get('mail')
groups = None
if dictionary.get('groups') is not None:
groups = [GroupNameAndID.from_dictionary(x) for x in dictionary.get('groups')]
privileges = dictionary.get('privileges')
tags = dictionary.get('tags')
state = dictionary.get('state')
notify_on_share = dictionary.get('notifyOnShare')
show_walk_me = dictionary.get('showWalkMe')
analyst_onboarding_complete = dictionary.get('analystOnboardingComplete')
first_login = dictionary.get('firstLogin')
welcome_email_sent = dictionary.get('welcomeEmailSent')
is_deleted = dictionary.get('isDeleted')
is_hidden = dictionary.get('isHidden')
is_external = dictionary.get('isExternal')
is_deprecated = dictionary.get('isDeprecated')
complete = dictionary.get('complete')
is_super_user = dictionary.get('isSuperUser')
is_system_principal = dictionary.get('isSystemPrincipal')
mtype = dictionary.get('type')
parenttype = dictionary.get('parenttype')
tenant_id = dictionary.get('tenantId')
index_version = dictionary.get('indexVersion')
generation_num = dictionary.get('generationNum')
created = dictionary.get('created')
modified = dictionary.get('modified')
author = UserNameAndID.from_dictionary(dictionary.get('author')) if dictionary.get('author') else None
modified_by = UserNameAndID.from_dictionary(dictionary.get('modifiedBy')) if dictionary.get('modifiedBy') else None
owner = UserNameAndID.from_dictionary(dictionary.get('owner')) if dictionary.get('owner') else None
# Return an object of this model
return cls(name,
display_name,
id,
visibility,
mail,
groups,
privileges,
tags,
state,
notify_on_share,
show_walk_me,
analyst_onboarding_complete,
first_login,
welcome_email_sent,
is_deleted,
is_hidden,
is_external,
is_deprecated,
complete,
is_super_user,
is_system_principal,
mtype,
parenttype,
tenant_id,
index_version,
generation_num,
created,
modified,
author,
modified_by,
owner)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/user_response.py | 0.456652 | 0.283592 | user_response.py | pypi |
class ApiRestV2MetadataTmlExportRequest(object):
"""Implementation of the 'Api Rest V2 Metadata Tml Export Request' model.
TODO: type model description here.
Attributes:
id (list of string): A JSON array of GUIDs of the objects.
format_type (FormatTypeEnum): The format in which to export the
objects
export_associated (bool): Specifies if you would like to export the
associated objects. To export the objects associated with the
objects specified in id, set the value to true. When set to true,
the API exports any underlying worksheets, tables, or views for a
given object. By default, the API does not export these underlying
objects
"""
# Create a mapping from Model property names to API property names
_names = {
"id": 'id',
"format_type": 'formatType',
"export_associated": 'exportAssociated'
}
def __init__(self,
id=None,
format_type='YAML',
export_associated=False):
"""Constructor for the ApiRestV2MetadataTmlExportRequest class"""
# Initialize members of the class
self.id = id
self.format_type = format_type
self.export_associated = export_associated
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
id = dictionary.get('id')
format_type = dictionary.get("formatType") if dictionary.get("formatType") else 'YAML'
export_associated = dictionary.get("exportAssociated") if dictionary.get("exportAssociated") else False
# Return an object of this model
return cls(id,
format_type,
export_associated)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_metadata_tml_export_request.py | 0.675444 | 0.431524 | api_rest_v_2_metadata_tml_export_request.py | pypi |
from restapisdk.models.group_name_and_id_input import GroupNameAndIDInput
class ApiRestV2UserSearchRequest(object):
"""Implementation of the 'Api Rest V2 User Search Request' model.
TODO: type model description here.
Attributes:
name (string): Name of the user.
id (string): The GUID of the user account to query
display_name (string): A unique display name string for the user,
usually their first and last name.
visibility (VisibilityEnum): Visibility of the user. The visibility
attribute is set to DEFAULT when creating a user. Setting this to
DEFAULT makes a user visible to other users and user groups, and
thus allows them to share objects
mail (string): email of the user account
groups (list of GroupNameAndIDInput): A JSON array of group names or
GUIDs or both. When both are given then id is considered
privileges (list of PrivilegeEnum): A JSON array of privileges
assigned to the user
state (StateEnum): Status of user account. acitve or inactive.
notify_on_share (NotifyOnShareEnum): User preference for receiving
email notifications when another ThoughtSpot user shares answers
or pinboards.
show_walk_me (ShowWalkMeEnum): The user preference for revisiting the
onboarding experience.
analyst_onboarding_complete (AnalystOnboardingCompleteEnum):
ThoughtSpot provides an interactive guided walkthrough to onboard
new users. The onboarding experience leads users through a set of
actions to help users get started and accomplish their tasks
quickly. The users can turn off the Onboarding experience and
access it again when they need assistance with the ThoughtSpot
UI.
mtype (Type2Enum): Type of user. LOCAL_USER indicates that the user is
created locally in the ThoughtSpot system.
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"id": 'id',
"display_name": 'displayName',
"visibility": 'visibility',
"mail": 'mail',
"groups": 'groups',
"privileges": 'privileges',
"state": 'state',
"notify_on_share": 'notifyOnShare',
"show_walk_me": 'showWalkMe',
"analyst_onboarding_complete": 'analystOnboardingComplete',
"mtype": 'type'
}
def __init__(self,
name=None,
id=None,
display_name=None,
visibility='DEFAULT',
mail=None,
groups=None,
privileges=None,
state=None,
notify_on_share=None,
show_walk_me=None,
analyst_onboarding_complete=None,
mtype=None):
"""Constructor for the ApiRestV2UserSearchRequest class"""
# Initialize members of the class
self.name = name
self.id = id
self.display_name = display_name
self.visibility = visibility
self.mail = mail
self.groups = groups
self.privileges = privileges
self.state = state
self.notify_on_share = notify_on_share
self.show_walk_me = show_walk_me
self.analyst_onboarding_complete = analyst_onboarding_complete
self.mtype = mtype
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
display_name = dictionary.get('displayName')
visibility = dictionary.get("visibility") if dictionary.get("visibility") else 'DEFAULT'
mail = dictionary.get('mail')
groups = None
if dictionary.get('groups') is not None:
groups = [GroupNameAndIDInput.from_dictionary(x) for x in dictionary.get('groups')]
privileges = dictionary.get('privileges')
state = dictionary.get('state')
notify_on_share = dictionary.get('notifyOnShare')
show_walk_me = dictionary.get('showWalkMe')
analyst_onboarding_complete = dictionary.get('analystOnboardingComplete')
mtype = dictionary.get('type')
# Return an object of this model
return cls(name,
id,
display_name,
visibility,
mail,
groups,
privileges,
state,
notify_on_share,
show_walk_me,
analyst_onboarding_complete,
mtype)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_user_search_request.py | 0.634317 | 0.337613 | api_rest_v_2_user_search_request.py | pypi |
from restapisdk.models.user_name_and_id_input import UserNameAndIDInput
class ApiRestV2GroupRemoveuserRequest(object):
"""Implementation of the 'Api Rest V2 Group Removeuser Request' model.
TODO: type model description here.
Attributes:
name (string): Name of the group
id (string): The GUID of the group to query.
users (list of UserNameAndIDInput): A JSON array of name of users or
GUIDs of users or both. When both are given then id is considered
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"id": 'id',
"users": 'users'
}
def __init__(self,
name=None,
id=None,
users=None):
"""Constructor for the ApiRestV2GroupRemoveuserRequest class"""
# Initialize members of the class
self.name = name
self.id = id
self.users = users
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
users = None
if dictionary.get('users') is not None:
users = [UserNameAndIDInput.from_dictionary(x) for x in dictionary.get('users')]
# Return an object of this model
return cls(name,
id,
users)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_group_removeuser_request.py | 0.618665 | 0.355915 | api_rest_v_2_group_removeuser_request.py | pypi |
class ApiRestV2SessionGettokenRequest(object):
"""Implementation of the 'Api Rest V2 Session Gettoken Request' model.
TODO: type model description here.
Attributes:
user_name (string): Username of the user account
password (string): The password of the user account
token_expiry_duration (string): Provide duration in seconds after
which the token should expire
"""
# Create a mapping from Model property names to API property names
_names = {
"user_name": 'userName',
"password": 'password',
"token_expiry_duration": 'tokenExpiryDuration'
}
def __init__(self,
user_name=None,
password=None,
token_expiry_duration=None):
"""Constructor for the ApiRestV2SessionGettokenRequest class"""
# Initialize members of the class
self.user_name = user_name
self.password = password
self.token_expiry_duration = token_expiry_duration
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
user_name = dictionary.get('userName')
password = dictionary.get('password')
token_expiry_duration = dictionary.get('tokenExpiryDuration')
# Return an object of this model
return cls(user_name,
password,
token_expiry_duration)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_session_gettoken_request.py | 0.674265 | 0.339007 | api_rest_v_2_session_gettoken_request.py | pypi |
from restapisdk.models.group_name_and_id_input import GroupNameAndIDInput
class ApiRestV2UserRemovegroupRequest(object):
"""Implementation of the 'Api Rest V2 User Removegroup Request' model.
TODO: type model description here.
Attributes:
name (string): User name of the user account
id (string): The GUID of the user account
groups (list of GroupNameAndIDInput): A JSON array of group names or
GUIDs or both. When both are given then id is considered
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"id": 'id',
"groups": 'groups'
}
def __init__(self,
name=None,
id=None,
groups=None):
"""Constructor for the ApiRestV2UserRemovegroupRequest class"""
# Initialize members of the class
self.name = name
self.id = id
self.groups = groups
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
groups = None
if dictionary.get('groups') is not None:
groups = [GroupNameAndIDInput.from_dictionary(x) for x in dictionary.get('groups')]
# Return an object of this model
return cls(name,
id,
groups)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_user_removegroup_request.py | 0.617397 | 0.30948 | api_rest_v_2_user_removegroup_request.py | pypi |
from restapisdk.models.user_name_and_id_input import UserNameAndIDInput
class ApiRestV2GroupAdduserRequest(object):
"""Implementation of the 'Api Rest V2 Group Adduser Request' model.
TODO: type model description here.
Attributes:
name (string): Name of the group
id (string): The GUID of the group
users (list of UserNameAndIDInput): A JSON array of name of users or
GUIDs of users or both. When both are given then id is considered
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"id": 'id',
"users": 'users'
}
def __init__(self,
name=None,
id=None,
users=None):
"""Constructor for the ApiRestV2GroupAdduserRequest class"""
# Initialize members of the class
self.name = name
self.id = id
self.users = users
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
users = None
if dictionary.get('users') is not None:
users = [UserNameAndIDInput.from_dictionary(x) for x in dictionary.get('users')]
# Return an object of this model
return cls(name,
id,
users)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_group_adduser_request.py | 0.608827 | 0.354992 | api_rest_v_2_group_adduser_request.py | pypi |
class HomeLiveboardResponse(object):
"""Implementation of the 'HomeLiveboardResponse' model.
TODO: type model description here.
Attributes:
user_name (string): Name of the user
user_id (string): The GUID of the user
liveboard_name (string): Name of the liveboard
liveboard_id (string): The GUID of the liveboard
"""
# Create a mapping from Model property names to API property names
_names = {
"user_name": 'userName',
"user_id": 'userId',
"liveboard_name": 'liveboardName',
"liveboard_id": 'liveboardId'
}
def __init__(self,
user_name=None,
user_id=None,
liveboard_name=None,
liveboard_id=None):
"""Constructor for the HomeLiveboardResponse class"""
# Initialize members of the class
self.user_name = user_name
self.user_id = user_id
self.liveboard_name = liveboard_name
self.liveboard_id = liveboard_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
user_name = dictionary.get('userName')
user_id = dictionary.get('userId')
liveboard_name = dictionary.get('liveboardName')
liveboard_id = dictionary.get('liveboardId')
# Return an object of this model
return cls(user_name,
user_id,
liveboard_name,
liveboard_id)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/home_liveboard_response.py | 0.660939 | 0.340979 | home_liveboard_response.py | pypi |
from restapisdk.models.group_name_and_id_input import GroupNameAndIDInput
class ApiRestV2GroupRemovegroupRequest(object):
"""Implementation of the 'Api Rest V2 Group Removegroup Request' model.
TODO: type model description here.
Attributes:
name (string): Name of the group
id (string): The GUID of the group
groups (list of GroupNameAndIDInput): A JSON array of group names or
GUIDs or both. When both are given then id is considered
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"id": 'id',
"groups": 'groups'
}
def __init__(self,
name=None,
id=None,
groups=None):
"""Constructor for the ApiRestV2GroupRemovegroupRequest class"""
# Initialize members of the class
self.name = name
self.id = id
self.groups = groups
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
groups = None
if dictionary.get('groups') is not None:
groups = [GroupNameAndIDInput.from_dictionary(x) for x in dictionary.get('groups')]
# Return an object of this model
return cls(name,
id,
groups)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_group_removegroup_request.py | 0.628179 | 0.337968 | api_rest_v_2_group_removegroup_request.py | pypi |
class AccessLevelInput(object):
"""Implementation of the 'AccessLevelInput' model.
TODO: type model description here.
Attributes:
name (string): Username or name of the user group
id (string): GUID of the user or user group
mtype (Type1Enum): Type of access detail provided
access (AccessEnum): Minimum access level that the specified user or
user group has.
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"id": 'id',
"mtype": 'type',
"access": 'access'
}
def __init__(self,
name=None,
id=None,
mtype=None,
access=None):
"""Constructor for the AccessLevelInput class"""
# Initialize members of the class
self.name = name
self.id = id
self.mtype = mtype
self.access = access
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
mtype = dictionary.get('type')
access = dictionary.get('access')
# Return an object of this model
return cls(name,
id,
mtype,
access)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/access_level_input.py | 0.672009 | 0.344692 | access_level_input.py | pypi |
from restapisdk.models.group_name_and_id_input import GroupNameAndIDInput
class ApiRestV2UserAddgroupRequest(object):
"""Implementation of the 'Api Rest V2 User Addgroup Request' model.
TODO: type model description here.
Attributes:
name (string): Username of the user account
id (string): The GUID of the user account
groups (list of GroupNameAndIDInput): A JSON array of group names or
GUIDs or both. When both are given then id is considered
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"id": 'id',
"groups": 'groups'
}
def __init__(self,
name=None,
id=None,
groups=None):
"""Constructor for the ApiRestV2UserAddgroupRequest class"""
# Initialize members of the class
self.name = name
self.id = id
self.groups = groups
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
groups = None
if dictionary.get('groups') is not None:
groups = [GroupNameAndIDInput.from_dictionary(x) for x in dictionary.get('groups')]
# Return an object of this model
return cls(name,
id,
groups)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_user_addgroup_request.py | 0.612194 | 0.318326 | api_rest_v_2_user_addgroup_request.py | pypi |
class ApiRestV2DatabaseTableCreateRequest(object):
"""Implementation of the 'Api Rest V2 Database Table Create Request' model.
TODO: type model description here.
Attributes:
create_database (bool): Flag to indicate if the database and schema
should be created if they do not exist in Falcon. (Valid values:
True/False)
schema (string): DDL of the table to be created.
"""
# Create a mapping from Model property names to API property names
_names = {
"create_database": 'createDatabase',
"schema": 'schema'
}
def __init__(self,
create_database=True,
schema=None):
"""Constructor for the ApiRestV2DatabaseTableCreateRequest class"""
# Initialize members of the class
self.create_database = create_database
self.schema = schema
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
create_database = dictionary.get("createDatabase") if dictionary.get("createDatabase") else True
schema = dictionary.get('schema')
# Return an object of this model
return cls(create_database,
schema)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_database_table_create_request.py | 0.673729 | 0.366023 | api_rest_v_2_database_table_create_request.py | pypi |
class ApiRestV2SessionLoginRequest(object):
"""Implementation of the 'Api Rest V2 Session Login Request' model.
TODO: type model description here.
Attributes:
user_name (string): Username of the user account
password (string): The password of the user account
remember_me (bool): A flag to remember the user session. When set to
true, sets a session cookie that persists in subsequent API
calls.
"""
# Create a mapping from Model property names to API property names
_names = {
"user_name": 'userName',
"password": 'password',
"remember_me": 'rememberMe'
}
def __init__(self,
user_name=None,
password=None,
remember_me=False):
"""Constructor for the ApiRestV2SessionLoginRequest class"""
# Initialize members of the class
self.user_name = user_name
self.password = password
self.remember_me = remember_me
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
user_name = dictionary.get('userName')
password = dictionary.get('password')
remember_me = dictionary.get("rememberMe") if dictionary.get("rememberMe") else False
# Return an object of this model
return cls(user_name,
password,
remember_me)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_session_login_request.py | 0.642208 | 0.3143 | api_rest_v_2_session_login_request.py | pypi |
class ApiRestV2DatabaseTableRunqueryRequest(object):
"""Implementation of the 'Api Rest V2 Database Table Runquery Request' model.
TODO: type model description here.
Attributes:
statement (list of string): A JSON array of TQL statements. Each TQL
statement should end with semi-colon (;). The TQL operations that
can be run through this API are restricted to create database and
schema, alter table, delete and update table rows. If a TQL
statement fails, then the subsequent statements in the array are
not run. Example statement: alter table
test_db.test_schema.test_table drop contraint primary key;
"""
# Create a mapping from Model property names to API property names
_names = {
"statement": 'statement'
}
def __init__(self,
statement=None):
"""Constructor for the ApiRestV2DatabaseTableRunqueryRequest class"""
# Initialize members of the class
self.statement = statement
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
statement = dictionary.get('statement')
# Return an object of this model
return cls(statement)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_database_table_runquery_request.py | 0.706697 | 0.425068 | api_rest_v_2_database_table_runquery_request.py | pypi |
from restapisdk.models.group_name_and_id_input import GroupNameAndIDInput
class ApiRestV2UserCreateRequest(object):
"""Implementation of the 'Api Rest V2 User Create Request' model.
TODO: type model description here.
Attributes:
name (string): Name of the user account. The username string must be
unique.
display_name (string): A display name string for the user, usually
their first and last name.
visibility (VisibilityEnum): Visibility of the user. The visibility
attribute is set to DEFAULT when creating a user. Setting this to
DEFAULT makes a user visible to other users and user groups, and
thus allows them to share objects
mail (string): Email id associated with the user account
password (string): Password for the user account.
groups (list of GroupNameAndIDInput): A JSON array of group names or
GUIDs or both. When both are given then id is considered
state (StateEnum): Status of user account. acitve or inactive.
notify_on_share (bool): User preference for receiving email
notifications when another ThoughtSpot user shares answers or
pinboards.
show_walk_me (bool): The user preference for revisiting the onboarding
experience.
analyst_onboarding_complete (bool): ThoughtSpot provides an
interactive guided walkthrough to onboard new users. The
onboarding experience leads users through a set of actions to help
users get started and accomplish their tasks quickly. The users
can turn off the Onboarding experience and access it again when
they need assistance with the ThoughtSpot UI.
mtype (Type2Enum): Type of user. LOCAL_USER indicates that the user is
created locally in the ThoughtSpot system.
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"display_name": 'displayName',
"password": 'password',
"visibility": 'visibility',
"mail": 'mail',
"groups": 'groups',
"state": 'state',
"notify_on_share": 'notifyOnShare',
"show_walk_me": 'showWalkMe',
"analyst_onboarding_complete": 'analystOnboardingComplete',
"mtype": 'type'
}
def __init__(self,
name=None,
display_name=None,
password=None,
visibility='DEFAULT',
mail=None,
groups=None,
state='ACTIVE',
notify_on_share=True,
show_walk_me=True,
analyst_onboarding_complete=True,
mtype='LOCAL_USER'):
"""Constructor for the ApiRestV2UserCreateRequest class"""
# Initialize members of the class
self.name = name
self.display_name = display_name
self.visibility = visibility
self.mail = mail
self.password = password
self.groups = groups
self.state = state
self.notify_on_share = notify_on_share
self.show_walk_me = show_walk_me
self.analyst_onboarding_complete = analyst_onboarding_complete
self.mtype = mtype
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
display_name = dictionary.get('displayName')
password = dictionary.get('password')
visibility = dictionary.get("visibility") if dictionary.get("visibility") else 'DEFAULT'
mail = dictionary.get('mail')
groups = None
if dictionary.get('groups') is not None:
groups = [GroupNameAndIDInput.from_dictionary(x) for x in dictionary.get('groups')]
state = dictionary.get("state") if dictionary.get("state") else 'ACTIVE'
notify_on_share = dictionary.get("notifyOnShare") if dictionary.get("notifyOnShare") else True
show_walk_me = dictionary.get("showWalkMe") if dictionary.get("showWalkMe") else True
analyst_onboarding_complete = dictionary.get("analystOnboardingComplete") if dictionary.get("analystOnboardingComplete") else True
mtype = dictionary.get("type") if dictionary.get("type") else 'LOCAL_USER'
# Return an object of this model
return cls(name,
display_name,
password,
visibility,
mail,
groups,
state,
notify_on_share,
show_walk_me,
analyst_onboarding_complete,
mtype)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_user_create_request.py | 0.62395 | 0.331755 | api_rest_v_2_user_create_request.py | pypi |
from restapisdk.models.meta_object_input import MetaObjectInput
class ApiRestV2MetadataTagAssignRequest(object):
"""Implementation of the 'Api Rest V2 Metadata Tag Assign Request' model.
TODO: type model description here.
Attributes:
name (string): Name of the tag
id (string): The GUID of the tag
meta_object (list of MetaObjectInput): Metadata object details
"""
# Create a mapping from Model property names to API property names
_names = {
"meta_object": 'metaObject',
"name": 'name',
"id": 'id'
}
def __init__(self,
meta_object=None,
name=None,
id=None):
"""Constructor for the ApiRestV2MetadataTagAssignRequest class"""
# Initialize members of the class
self.name = name
self.id = id
self.meta_object = meta_object
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
meta_object = None
if dictionary.get('metaObject') is not None:
meta_object = [MetaObjectInput.from_dictionary(x) for x in dictionary.get('metaObject')]
name = dictionary.get('name')
id = dictionary.get('id')
# Return an object of this model
return cls(meta_object,
name,
id)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_metadata_tag_assign_request.py | 0.672977 | 0.214301 | api_rest_v_2_metadata_tag_assign_request.py | pypi |
from restapisdk.models.client_state import ClientState
from restapisdk.models.tag_name_and_id import TagNameAndID
class MetadataTagResponse(object):
"""Implementation of the 'MetadataTagResponse' model.
TODO: type model description here.
Attributes:
name (string): Name of the tag
id (string): GUID of the tag
client_state (ClientState): TODO: type description here.
index_version (int): TODO: type description here.
generation_num (int): TODO: type description here.
is_deleted (bool): Indicates if the tag is deleted
is_hidden (bool): Indicates if the tag is hidden
is_external (bool): Indicates if the tag is from external system
is_deprecated (bool): TODO: type description here.
created (float): Date and time when group was created
modified (float): Date and time of last modification of the group
modified_by (TagNameAndID): TODO: type description here.
author (TagNameAndID): TODO: type description here.
owner (TagNameAndID): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"id": 'id',
"client_state": 'clientState',
"index_version": 'indexVersion',
"generation_num": 'generationNum',
"is_deleted": 'isDeleted',
"is_hidden": 'isHidden',
"is_external": 'isExternal',
"is_deprecated": 'isDeprecated',
"created": 'created',
"modified": 'modified',
"modified_by": 'modifiedBy',
"author": 'author',
"owner": 'owner'
}
def __init__(self,
name=None,
id=None,
client_state=None,
index_version=None,
generation_num=None,
is_deleted=None,
is_hidden=None,
is_external=None,
is_deprecated=None,
created=None,
modified=None,
modified_by=None,
author=None,
owner=None):
"""Constructor for the MetadataTagResponse class"""
# Initialize members of the class
self.name = name
self.id = id
self.client_state = client_state
self.index_version = index_version
self.generation_num = generation_num
self.is_deleted = is_deleted
self.is_hidden = is_hidden
self.is_external = is_external
self.is_deprecated = is_deprecated
self.created = created
self.modified = modified
self.modified_by = modified_by
self.author = author
self.owner = owner
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
client_state = ClientState.from_dictionary(dictionary.get('clientState')) if dictionary.get('clientState') else None
index_version = dictionary.get('indexVersion')
generation_num = dictionary.get('generationNum')
is_deleted = dictionary.get('isDeleted')
is_hidden = dictionary.get('isHidden')
is_external = dictionary.get('isExternal')
is_deprecated = dictionary.get('isDeprecated')
created = dictionary.get('created')
modified = dictionary.get('modified')
modified_by = TagNameAndID.from_dictionary(dictionary.get('modifiedBy')) if dictionary.get('modifiedBy') else None
author = TagNameAndID.from_dictionary(dictionary.get('author')) if dictionary.get('author') else None
owner = TagNameAndID.from_dictionary(dictionary.get('owner')) if dictionary.get('owner') else None
# Return an object of this model
return cls(name,
id,
client_state,
index_version,
generation_num,
is_deleted,
is_hidden,
is_external,
is_deprecated,
created,
modified,
modified_by,
author,
owner)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/metadata_tag_response.py | 0.572842 | 0.287443 | metadata_tag_response.py | pypi |
from restapisdk.models.group_name_and_id_input import GroupNameAndIDInput
from restapisdk.models.user_name_and_id_input import UserNameAndIDInput
class ApiRestV2GroupCreateRequest(object):
"""Implementation of the 'Api Rest V2 Group Create Request' model.
TODO: type model description here.
Attributes:
name (string): Name of the user group. The group name string must be
unique.
display_name (string): A unique display name string for the user
group, for example, Developer group.
visibility (Visibility3Enum): The visibility attribute is set to
DEFAULT when creating a group. Setting this to DEFAULT makes a
group visible to other users and user groups, and thus allows them
to share objects
description (string): Description text for the group.
privileges (list of PrivilegeEnum): A JSON array of privileges
assigned to the group
groups (list of GroupNameAndIDInput): A JSON array of group names or
GUIDs or both. When both are given then id is considered
users (list of UserNameAndIDInput): A JSON array of name of users or
GUIDs of users or both. When both are given then id is considered
mtype (Type5Enum): Type of user group. LOCAL_GROUP indicates that the
user is created locally in the ThoughtSpot system.
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"display_name": 'displayName',
"visibility": 'visibility',
"description": 'description',
"privileges": 'privileges',
"groups": 'groups',
"users": 'users',
"mtype": 'type'
}
def __init__(self,
name=None,
display_name=None,
visibility='DEFAULT',
description=None,
privileges=None,
groups=None,
users=None,
mtype='LOCAL_GROUP'):
"""Constructor for the ApiRestV2GroupCreateRequest class"""
# Initialize members of the class
self.name = name
self.display_name = display_name
self.visibility = visibility
self.description = description
self.privileges = privileges
self.groups = groups
self.users = users
self.mtype = mtype
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
display_name = dictionary.get('displayName')
visibility = dictionary.get("visibility") if dictionary.get("visibility") else 'DEFAULT'
description = dictionary.get('description')
privileges = dictionary.get('privileges')
groups = None
if dictionary.get('groups') is not None:
groups = [GroupNameAndIDInput.from_dictionary(x) for x in dictionary.get('groups')]
users = None
if dictionary.get('users') is not None:
users = [UserNameAndIDInput.from_dictionary(x) for x in dictionary.get('users')]
mtype = dictionary.get("type") if dictionary.get("type") else 'LOCAL_GROUP'
# Return an object of this model
return cls(name,
display_name,
visibility,
description,
privileges,
groups,
users,
mtype)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_group_create_request.py | 0.64713 | 0.38341 | api_rest_v_2_group_create_request.py | pypi |
class ApiRestV2MetadataTmlImportRequest(object):
"""Implementation of the 'Api Rest V2 Metadata Tml Import Request' model.
TODO: type model description here.
Attributes:
object_tml (list of string): A JSON array of TML objects to upload, in
YAML or JSON format. If in YAML format within the JSON array, use
escape characters for YAML quotes, and new line characters when
there is a new line
import_policy (ImportPolicyEnum): Policy to follow during import
force_create (bool): Specifies if you are updating or creating
objects. To create new objects, specify true. By default,
ThoughtSpot updates existing objects that have the same GUID as
the objects you are importing. When set to true, the GUID property
in the imported TML is replaced on the server, and the response
headers will include the id_guid property with the GUID of the new
object. The new object will be assigned a new GUID, even if the
imported TML file included a guid value. Thus, there is no need to
include the guid in the TML file if you are using
forceCreate=true.
"""
# Create a mapping from Model property names to API property names
_names = {
"object_tml": 'objectTML',
"import_policy": 'importPolicy',
"force_create": 'forceCreate'
}
def __init__(self,
object_tml=None,
import_policy='PARTIAL',
force_create=False):
"""Constructor for the ApiRestV2MetadataTmlImportRequest class"""
# Initialize members of the class
self.object_tml = object_tml
self.import_policy = import_policy
self.force_create = force_create
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
object_tml = dictionary.get('objectTML')
import_policy = dictionary.get("importPolicy") if dictionary.get("importPolicy") else 'PARTIAL'
force_create = dictionary.get("forceCreate") if dictionary.get("forceCreate") else False
# Return an object of this model
return cls(object_tml,
import_policy,
force_create)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_metadata_tml_import_request.py | 0.648911 | 0.404037 | api_rest_v_2_metadata_tml_import_request.py | pypi |
from restapisdk.models.access_level_input import AccessLevelInput
from restapisdk.models.name_and_id_input import NameAndIdInput
from restapisdk.models.tag_name_and_id_input import TagNameAndIdInput
class ApiRestV2MetadataHeaderSearchRequest(object):
"""Implementation of the 'Api Rest V2 Metadata Header Search Request' model.
TODO: type model description here.
Attributes:
output_fields (list of string): Array of header field names that need
to be included in the header response
offset (string): The offset point, starting from where the records
should be included in the response. If no input is provided then
offset starts from 0.
batch_size (string): The number of records that should be included in
the response starting from offset position. If no input is
provided, then all records starting from the value provided in
offset is included in the response.
sort_by (SortByEnum): Field based on which the response needs to be
ordered.
sort_order (SortOrderEnum): Order in which sortBy should be applied.
mtype (Type9Enum): Type of the metadata object being searched.
name_pattern (string): A pattern to match the name of the metadata
object. This parameter supports matching case-insensitive strings.
For a wildcard match, use %.
fetch_id (list of string): A JSON array containing the GUIDs of the
metadata objects that you want to fetch.
skip_id (list of string): A JSON array containing the GUIDs of the
metadata objects that you want to skip.
show_hidden (bool): When set to true, returns details of the hidden
objects, such as a column in a worksheet or a table.
auto_created (AutoCreatedEnum): String for UI and backend boolean- A
flag to indicate whether to list only the auto created objects.
When no value is provided as input then all objects are returned.
access_level (list of AccessLevelInput): A JSON array of objects with
user details for which the metadata objects should be considered
from the repository If you specify ID or name of user and set the
type parameter to USER, the API returns metadata objects
associated with the user If you specify ID or name of user group
and set the type parameter to USER_GROUP, the API returns metadata
objects for all the users mapped to the specified user group. If
the id or name parameter is not defined, but the type attribute is
set to USER or USER_GROUP, then the API will not return and
response. If no input is provided for any field for this object,
then the API returns headers for all users. If both name and id is
provided, then id will be considered.
tag (list of TagNameAndIdInput): A JSON array of name or GUID of tags
or both. When both are given then id is considered.
favorite_for (list of NameAndIdInput): A JSON array of name or GUID of
the user or both for whom the object is assigned as favorite. When
both are given then id is considered.
created_by (list of NameAndIdInput): A JSON array of name or GUID of
the user or both who created the object. When both are given then
id is considered.
last_modified_by (list of NameAndIdInput): A JSON array of name or
GUID of the user or both who last modified the object. When both
are given then id is considered.
owned_by (list of NameAndIdInput): A JSON array of name or GUID of the
user or both who last modified the object. When both are given
then id is considered.
"""
# Create a mapping from Model property names to API property names
_names = {
"mtype": 'type',
"output_fields": 'outputFields',
"offset": 'offset',
"batch_size": 'batchSize',
"sort_by": 'sortBy',
"sort_order": 'sortOrder',
"name_pattern": 'namePattern',
"fetch_id": 'fetchId',
"skip_id": 'skipId',
"show_hidden": 'showHidden',
"auto_created": 'autoCreated',
"access_level": 'accessLevel',
"tag": 'tag',
"favorite_for": 'favoriteFor',
"created_by": 'createdBy',
"last_modified_by": 'lastModifiedBy',
"owned_by": 'ownedBy'
}
def __init__(self,
mtype=None,
output_fields=None,
offset='0',
batch_size=None,
sort_by='DEFAULT',
sort_order='DEFAULT',
name_pattern=None,
fetch_id=None,
skip_id=None,
show_hidden=False,
auto_created=None,
access_level=None,
tag=None,
favorite_for=None,
created_by=None,
last_modified_by=None,
owned_by=None):
"""Constructor for the ApiRestV2MetadataHeaderSearchRequest class"""
# Initialize members of the class
self.output_fields = output_fields
self.offset = offset
self.batch_size = batch_size
self.sort_by = sort_by
self.sort_order = sort_order
self.mtype = mtype
self.name_pattern = name_pattern
self.fetch_id = fetch_id
self.skip_id = skip_id
self.show_hidden = show_hidden
self.auto_created = auto_created
self.access_level = access_level
self.tag = tag
self.favorite_for = favorite_for
self.created_by = created_by
self.last_modified_by = last_modified_by
self.owned_by = owned_by
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
mtype = dictionary.get('type')
output_fields = dictionary.get('outputFields')
offset = dictionary.get("offset") if dictionary.get("offset") else '0'
batch_size = dictionary.get('batchSize')
sort_by = dictionary.get("sortBy") if dictionary.get("sortBy") else 'DEFAULT'
sort_order = dictionary.get("sortOrder") if dictionary.get("sortOrder") else 'DEFAULT'
name_pattern = dictionary.get('namePattern')
fetch_id = dictionary.get('fetchId')
skip_id = dictionary.get('skipId')
show_hidden = dictionary.get("showHidden") if dictionary.get("showHidden") else False
auto_created = dictionary.get('autoCreated')
access_level = None
if dictionary.get('accessLevel') is not None:
access_level = [AccessLevelInput.from_dictionary(x) for x in dictionary.get('accessLevel')]
tag = None
if dictionary.get('tag') is not None:
tag = [TagNameAndIdInput.from_dictionary(x) for x in dictionary.get('tag')]
favorite_for = None
if dictionary.get('favoriteFor') is not None:
favorite_for = [NameAndIdInput.from_dictionary(x) for x in dictionary.get('favoriteFor')]
created_by = None
if dictionary.get('createdBy') is not None:
created_by = [NameAndIdInput.from_dictionary(x) for x in dictionary.get('createdBy')]
last_modified_by = None
if dictionary.get('lastModifiedBy') is not None:
last_modified_by = [NameAndIdInput.from_dictionary(x) for x in dictionary.get('lastModifiedBy')]
owned_by = None
if dictionary.get('ownedBy') is not None:
owned_by = [NameAndIdInput.from_dictionary(x) for x in dictionary.get('ownedBy')]
# Return an object of this model
return cls(mtype,
output_fields,
offset,
batch_size,
sort_by,
sort_order,
name_pattern,
fetch_id,
skip_id,
show_hidden,
auto_created,
access_level,
tag,
favorite_for,
created_by,
last_modified_by,
owned_by)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_metadata_header_search_request.py | 0.557002 | 0.436142 | api_rest_v_2_metadata_header_search_request.py | pypi |
from restapisdk.models.group_name_and_id_input import GroupNameAndIDInput
from restapisdk.models.user_name_and_id_input import UserNameAndIDInput
class ApiRestV2GroupSearchRequest(object):
"""Implementation of the 'Api Rest V2 Group Search Request' model.
TODO: type model description here.
Attributes:
name (string): Name of the user group
id (string): GUID of the group to update
display_name (string): A unique display name string for the user
group, for example, Developer group.
visibility (Visibility3Enum): The visibility attribute is set to
DEFAULT when creating a group. Setting this to DEFAULT makes a
group visible to other users and user groups, and thus allows them
to share objects
description (string): Description text for the group.
privileges (list of PrivilegeEnum): A JSON array of privileges
assigned to the group
groups (list of GroupNameAndIDInput): A JSON array of group names or
GUIDs or both. When both are given then id is considered
users (list of UserNameAndIDInput): A JSON array of name of users or
GUIDs of users or both. When both are given then id is considered
mtype (Type5Enum): Type of user group. LOCAL_GROUP indicates that the
user is created locally in the ThoughtSpot system.
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"id": 'id',
"display_name": 'displayName',
"visibility": 'visibility',
"description": 'description',
"privileges": 'privileges',
"groups": 'groups',
"users": 'users',
"mtype": 'type'
}
def __init__(self,
name=None,
id=None,
display_name=None,
visibility=None,
description=None,
privileges=None,
groups=None,
users=None,
mtype=None):
"""Constructor for the ApiRestV2GroupSearchRequest class"""
# Initialize members of the class
self.name = name
self.id = id
self.display_name = display_name
self.visibility = visibility
self.description = description
self.privileges = privileges
self.groups = groups
self.users = users
self.mtype = mtype
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
display_name = dictionary.get('displayName')
visibility = dictionary.get('visibility')
description = dictionary.get('description')
privileges = dictionary.get('privileges')
groups = None
if dictionary.get('groups') is not None:
groups = [GroupNameAndIDInput.from_dictionary(x) for x in dictionary.get('groups')]
users = None
if dictionary.get('users') is not None:
users = [UserNameAndIDInput.from_dictionary(x) for x in dictionary.get('users')]
mtype = dictionary.get('type')
# Return an object of this model
return cls(name,
id,
display_name,
visibility,
description,
privileges,
groups,
users,
mtype)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_group_search_request.py | 0.654895 | 0.389866 | api_rest_v_2_group_search_request.py | pypi |
class TableTypes(object):
"""Implementation of the 'TableTypes' model.
TODO: type model description here.
Attributes:
name (string): TODO: type description here.
db_name (string): TODO: type description here.
schema_name (string): TODO: type description here.
column (list of object): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"db_name": 'dbName',
"schema_name": 'schemaName',
"column": 'column'
}
def __init__(self,
name=None,
db_name=None,
schema_name=None,
column=None):
"""Constructor for the TableTypes class"""
# Initialize members of the class
self.name = name
self.db_name = db_name
self.schema_name = schema_name
self.column = column
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
db_name = dictionary.get('dbName')
schema_name = dictionary.get('schemaName')
column = dictionary.get('column')
# Return an object of this model
return cls(name,
db_name,
schema_name,
column)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/table_types.py | 0.553988 | 0.357427 | table_types.py | pypi |
from restapisdk.models.meta_object_input import MetaObjectInput
class ApiRestV2MetadataFavoriteUnassignRequest(object):
"""Implementation of the 'Api Rest V2 Metadata Favorite Unassign Request' model.
TODO: type model description here.
Attributes:
user_name (string): Name of the user
user_id (string): The GUID of the user
meta_object (list of MetaObjectInput): Metadata object details
"""
# Create a mapping from Model property names to API property names
_names = {
"meta_object": 'metaObject',
"user_name": 'userName',
"user_id": 'userId'
}
def __init__(self,
meta_object=None,
user_name=None,
user_id=None):
"""Constructor for the ApiRestV2MetadataFavoriteUnassignRequest class"""
# Initialize members of the class
self.user_name = user_name
self.user_id = user_id
self.meta_object = meta_object
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
meta_object = None
if dictionary.get('metaObject') is not None:
meta_object = [MetaObjectInput.from_dictionary(x) for x in dictionary.get('metaObject')]
user_name = dictionary.get('userName')
user_id = dictionary.get('userId')
# Return an object of this model
return cls(meta_object,
user_name,
user_id)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_metadata_favorite_unassign_request.py | 0.647241 | 0.206934 | api_rest_v_2_metadata_favorite_unassign_request.py | pypi |
from restapisdk.models.logical_table_header import LogicalTableHeader
class CreateTableResponse(object):
"""Implementation of the 'CreateTableResponse' model.
TODO: type model description here.
Attributes:
logical_table_header (LogicalTableHeader): TODO: type description
here.
physical_table_id (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"logical_table_header": 'logicalTableHeader',
"physical_table_id": 'physicalTableId'
}
def __init__(self,
logical_table_header=None,
physical_table_id=None):
"""Constructor for the CreateTableResponse class"""
# Initialize members of the class
self.logical_table_header = logical_table_header
self.physical_table_id = physical_table_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
logical_table_header = LogicalTableHeader.from_dictionary(dictionary.get('logicalTableHeader')) if dictionary.get('logicalTableHeader') else None
physical_table_id = dictionary.get('physicalTableId')
# Return an object of this model
return cls(logical_table_header,
physical_table_id)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/create_table_response.py | 0.591841 | 0.320117 | create_table_response.py | pypi |
from restapisdk.models.group_name_and_id_input import GroupNameAndIDInput
from restapisdk.models.user_name_and_id_input import UserNameAndIDInput
class ApiRestV2GroupUpdateRequest(object):
"""Implementation of the 'Api Rest V2 Group Update Request' model.
TODO: type model description here.
Attributes:
name (string): Name of the user group
id (string): GUID of the group to update
display_name (string): A unique display name string for the user
group, for example, Developer group.
visibility (Visibility3Enum): The visibility attribute is set to
DEFAULT when creating a group. Setting this to DEFAULT makes a
group visible to other users and user groups, and thus allows them
to share objects
description (string): Description text for the group.
privileges (list of PrivilegeEnum): A JSON array of privileges
assigned to the group
groups (list of GroupNameAndIDInput): A JSON array of group names or
GUIDs or both. When both are given then id is considered
users (list of UserNameAndIDInput): A JSON array of name of users or
GUIDs of users or both. When both are given then id is considered
assigned_liveboards (list of string): An array of liveboard ids to be
assigned to the group.
mtype (Type5Enum): Type of user group. LOCAL_GROUP indicates that the
user is created locally in the ThoughtSpot system.
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"id": 'id',
"display_name": 'displayName',
"visibility": 'visibility',
"description": 'description',
"privileges": 'privileges',
"groups": 'groups',
"users": 'users',
"assigned_liveboards": 'assignedLiveboards',
"mtype": 'type'
}
def __init__(self,
name=None,
id=None,
display_name=None,
visibility='DEFAULT',
description=None,
privileges=None,
groups=None,
users=None,
assigned_liveboards=None,
mtype='LOCAL_GROUP'):
"""Constructor for the ApiRestV2GroupUpdateRequest class"""
# Initialize members of the class
self.name = name
self.id = id
self.display_name = display_name
self.visibility = visibility
self.description = description
self.privileges = privileges
self.groups = groups
self.users = users
self.assigned_liveboards = assigned_liveboards
self.mtype = mtype
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
display_name = dictionary.get('displayName')
visibility = dictionary.get("visibility") if dictionary.get("visibility") else 'DEFAULT'
description = dictionary.get('description')
privileges = dictionary.get('privileges')
groups = None
if dictionary.get('groups') is not None:
groups = [GroupNameAndIDInput.from_dictionary(x) for x in dictionary.get('groups')]
users = None
if dictionary.get('users') is not None:
users = [UserNameAndIDInput.from_dictionary(x) for x in dictionary.get('users')]
assigned_liveboards = dictionary.get('assignedLiveboards')
mtype = dictionary.get("type") if dictionary.get("type") else 'LOCAL_GROUP'
# Return an object of this model
return cls(name,
id,
display_name,
visibility,
description,
privileges,
groups,
users,
assigned_liveboards,
mtype)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_group_update_request.py | 0.650467 | 0.393123 | api_rest_v_2_group_update_request.py | pypi |
from restapisdk.models.group_name_and_id import GroupNameAndID
from restapisdk.models.liveboard_name_and_id import LiveboardNameAndID
from restapisdk.models.user_name_and_id import UserNameAndID
class GroupResponse(object):
"""Implementation of the 'GroupResponse' model.
TODO: type model description here.
Attributes:
name (string): Name of the group
display_name (string): A unique display name string for the user
group
id (string): GUID of the group
visibility (string): Visibility of the group
description (string): Description of the group
privileges (list of string): Privileges assigned to the group
groups (list of GroupNameAndID): Name of the group to which is added
users (list of UserNameAndID): User Group Information by Id or Name.
assigned_liveboards (list of LiveboardNameAndID): Liveboards assigned
to the group
user_group_content (object): TODO: type description here.
tags (list of string): Tags assigned to the group
is_deleted (bool): Indicates if the group is deleted
is_hidden (bool): Indicates if the group is hidden
is_external (bool): Indicates if the group is from external system
is_deprecated (bool): TODO: type description here.
complete (bool): Indicates if the all the properties of group is
provided
is_system_principal (bool): Indicates if the group is system
principal
mtype (string): Indicates the type of group
parenttype (string): Indicates the type of parent object
group_idx (int): TODO: type description here.
metadata_version (int): TODO: type description here.
tenant_id (string): Tenant id associated with the group
index_version (int): TODO: type description here.
generation_num (int): TODO: type description here.
created (float): Date and time when group was created
modified (float): Date and time of last modification of the group
modified_by (UserNameAndID): TODO: type description here.
author (UserNameAndID): TODO: type description here.
owner (UserNameAndID): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"display_name": 'displayName',
"id": 'id',
"visibility": 'visibility',
"description": 'description',
"privileges": 'privileges',
"groups": 'groups',
"users": 'users',
"assigned_liveboards": 'assignedLiveboards',
"user_group_content": 'userGroupContent',
"tags": 'tags',
"is_deleted": 'isDeleted',
"is_hidden": 'isHidden',
"is_external": 'isExternal',
"is_deprecated": 'isDeprecated',
"complete": 'complete',
"is_system_principal": 'isSystemPrincipal',
"mtype": 'type',
"parenttype": 'parenttype',
"group_idx": 'groupIdx',
"metadata_version": 'metadataVersion',
"tenant_id": 'tenantId',
"index_version": 'indexVersion',
"generation_num": 'generationNum',
"created": 'created',
"modified": 'modified',
"modified_by": 'modifiedBy',
"author": 'author',
"owner": 'owner'
}
def __init__(self,
name=None,
display_name=None,
id=None,
visibility=None,
description=None,
privileges=None,
groups=None,
users=None,
assigned_liveboards=None,
user_group_content=None,
tags=None,
is_deleted=None,
is_hidden=None,
is_external=None,
is_deprecated=None,
complete=None,
is_system_principal=None,
mtype=None,
parenttype=None,
group_idx=None,
metadata_version=None,
tenant_id=None,
index_version=None,
generation_num=None,
created=None,
modified=None,
modified_by=None,
author=None,
owner=None):
"""Constructor for the GroupResponse class"""
# Initialize members of the class
self.name = name
self.display_name = display_name
self.id = id
self.visibility = visibility
self.description = description
self.privileges = privileges
self.groups = groups
self.users = users
self.assigned_liveboards = assigned_liveboards
self.user_group_content = user_group_content
self.tags = tags
self.is_deleted = is_deleted
self.is_hidden = is_hidden
self.is_external = is_external
self.is_deprecated = is_deprecated
self.complete = complete
self.is_system_principal = is_system_principal
self.mtype = mtype
self.parenttype = parenttype
self.group_idx = group_idx
self.metadata_version = metadata_version
self.tenant_id = tenant_id
self.index_version = index_version
self.generation_num = generation_num
self.created = created
self.modified = modified
self.modified_by = modified_by
self.author = author
self.owner = owner
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
display_name = dictionary.get('displayName')
id = dictionary.get('id')
visibility = dictionary.get('visibility')
description = dictionary.get('description')
privileges = dictionary.get('privileges')
groups = None
if dictionary.get('groups') is not None:
groups = [GroupNameAndID.from_dictionary(x) for x in dictionary.get('groups')]
users = None
if dictionary.get('users') is not None:
users = [UserNameAndID.from_dictionary(x) for x in dictionary.get('users')]
assigned_liveboards = None
if dictionary.get('assignedLiveboards') is not None:
assigned_liveboards = [LiveboardNameAndID.from_dictionary(x) for x in dictionary.get('assignedLiveboards')]
user_group_content = dictionary.get('userGroupContent')
tags = dictionary.get('tags')
is_deleted = dictionary.get('isDeleted')
is_hidden = dictionary.get('isHidden')
is_external = dictionary.get('isExternal')
is_deprecated = dictionary.get('isDeprecated')
complete = dictionary.get('complete')
is_system_principal = dictionary.get('isSystemPrincipal')
mtype = dictionary.get('type')
parenttype = dictionary.get('parenttype')
group_idx = dictionary.get('groupIdx')
metadata_version = dictionary.get('metadataVersion')
tenant_id = dictionary.get('tenantId')
index_version = dictionary.get('indexVersion')
generation_num = dictionary.get('generationNum')
created = dictionary.get('created')
modified = dictionary.get('modified')
modified_by = UserNameAndID.from_dictionary(dictionary.get('modifiedBy')) if dictionary.get('modifiedBy') else None
author = UserNameAndID.from_dictionary(dictionary.get('author')) if dictionary.get('author') else None
owner = UserNameAndID.from_dictionary(dictionary.get('owner')) if dictionary.get('owner') else None
# Return an object of this model
return cls(name,
display_name,
id,
visibility,
description,
privileges,
groups,
users,
assigned_liveboards,
user_group_content,
tags,
is_deleted,
is_hidden,
is_external,
is_deprecated,
complete,
is_system_principal,
mtype,
parenttype,
group_idx,
metadata_version,
tenant_id,
index_version,
generation_num,
created,
modified,
modified_by,
author,
owner)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/group_response.py | 0.482185 | 0.302984 | group_response.py | pypi |
class ApiRestV2MetadataHomeliveboardAssignRequest(object):
"""Implementation of the 'Api Rest V2 Metadata Homeliveboard Assign Request' model.
TODO: type model description here.
Attributes:
user_name (string): Name of the user
user_id (string): The GUID of the user
liveboard_id (string): The GUID of the liveboard
"""
# Create a mapping from Model property names to API property names
_names = {
"user_name": 'userName',
"user_id": 'userId',
"liveboard_id": 'liveboardId'
}
def __init__(self,
user_name=None,
user_id=None,
liveboard_id=None):
"""Constructor for the ApiRestV2MetadataHomeliveboardAssignRequest class"""
# Initialize members of the class
self.user_name = user_name
self.user_id = user_id
self.liveboard_id = liveboard_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
user_name = dictionary.get('userName')
user_id = dictionary.get('userId')
liveboard_id = dictionary.get('liveboardId')
# Return an object of this model
return cls(user_name,
user_id,
liveboard_id)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_metadata_homeliveboard_assign_request.py | 0.675229 | 0.301773 | api_rest_v_2_metadata_homeliveboard_assign_request.py | pypi |
from restapisdk.models.meta_object_input import MetaObjectInput
class ApiRestV2MetadataFavoriteAssignRequest(object):
"""Implementation of the 'Api Rest V2 Metadata Favorite Assign Request' model.
TODO: type model description here.
Attributes:
user_name (string): Name of the user
user_id (string): The GUID of the user
meta_object (list of MetaObjectInput): Metadata object details
"""
# Create a mapping from Model property names to API property names
_names = {
"meta_object": 'metaObject',
"user_name": 'userName',
"user_id": 'userId'
}
def __init__(self,
meta_object=None,
user_name=None,
user_id=None):
"""Constructor for the ApiRestV2MetadataFavoriteAssignRequest class"""
# Initialize members of the class
self.user_name = user_name
self.user_id = user_id
self.meta_object = meta_object
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
meta_object = None
if dictionary.get('metaObject') is not None:
meta_object = [MetaObjectInput.from_dictionary(x) for x in dictionary.get('metaObject')]
user_name = dictionary.get('userName')
user_id = dictionary.get('userId')
# Return an object of this model
return cls(meta_object,
user_name,
user_id)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_metadata_favorite_assign_request.py | 0.650356 | 0.213213 | api_rest_v_2_metadata_favorite_assign_request.py | pypi |
class LogicalTableHeader(object):
"""Implementation of the 'LogicalTableHeader' model.
TODO: type model description here.
Attributes:
id (string): TODO: type description here.
name (string): TODO: type description here.
author (string): TODO: type description here.
author_name (string): TODO: type description here.
author_display_name (string): TODO: type description here.
created (float): TODO: type description here.
modified (float): TODO: type description here.
modified_by (string): TODO: type description here.
generation_num (int): TODO: type description here.
owner (string): TODO: type description here.
deleted (bool): TODO: type description here.
hidden (bool): TODO: type description here.
database (string): TODO: type description here.
schema (string): TODO: type description here.
mtype (string): TODO: type description here.
sub_type (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"id": 'id',
"name": 'name',
"author": 'author',
"author_name": 'authorName',
"author_display_name": 'authorDisplayName',
"created": 'created',
"modified": 'modified',
"modified_by": 'modifiedBy',
"generation_num": 'generationNum',
"owner": 'owner',
"deleted": 'deleted',
"hidden": 'hidden',
"database": 'database',
"schema": 'schema',
"mtype": 'type',
"sub_type": 'subType'
}
def __init__(self,
id=None,
name=None,
author=None,
author_name=None,
author_display_name=None,
created=None,
modified=None,
modified_by=None,
generation_num=None,
owner=None,
deleted=None,
hidden=None,
database=None,
schema=None,
mtype=None,
sub_type=None):
"""Constructor for the LogicalTableHeader class"""
# Initialize members of the class
self.id = id
self.name = name
self.author = author
self.author_name = author_name
self.author_display_name = author_display_name
self.created = created
self.modified = modified
self.modified_by = modified_by
self.generation_num = generation_num
self.owner = owner
self.deleted = deleted
self.hidden = hidden
self.database = database
self.schema = schema
self.mtype = mtype
self.sub_type = sub_type
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
id = dictionary.get('id')
name = dictionary.get('name')
author = dictionary.get('author')
author_name = dictionary.get('authorName')
author_display_name = dictionary.get('authorDisplayName')
created = dictionary.get('created')
modified = dictionary.get('modified')
modified_by = dictionary.get('modifiedBy')
generation_num = dictionary.get('generationNum')
owner = dictionary.get('owner')
deleted = dictionary.get('deleted')
hidden = dictionary.get('hidden')
database = dictionary.get('database')
schema = dictionary.get('schema')
mtype = dictionary.get('type')
sub_type = dictionary.get('subType')
# Return an object of this model
return cls(id,
name,
author,
author_name,
author_display_name,
created,
modified,
modified_by,
generation_num,
owner,
deleted,
hidden,
database,
schema,
mtype,
sub_type)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/logical_table_header.py | 0.546012 | 0.268471 | logical_table_header.py | pypi |
class ApiRestV2MetadataDependencyRequest(object):
"""Implementation of the 'Api Rest V2 Metadata Dependency Request' model.
TODO: type model description here.
Attributes:
mtype (Type10Enum): Type of the data object
id (list of string): A JSON array of GUIDs of the objects
batch_size (int): he maximum number of batches to fetch in a query. If
this attribute is not defined, the value specified in the cluster
configuration is used. To get the list of all dependent objects in
a single query, define the batch size attribute as -1
"""
# Create a mapping from Model property names to API property names
_names = {
"mtype": 'type',
"id": 'id',
"batch_size": 'batchSize'
}
def __init__(self,
mtype=None,
id=None,
batch_size=None):
"""Constructor for the ApiRestV2MetadataDependencyRequest class"""
# Initialize members of the class
self.mtype = mtype
self.id = id
self.batch_size = batch_size
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
mtype = dictionary.get('type')
id = dictionary.get('id')
batch_size = dictionary.get('batchSize')
# Return an object of this model
return cls(mtype,
id,
batch_size)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_metadata_dependency_request.py | 0.715126 | 0.426381 | api_rest_v_2_metadata_dependency_request.py | pypi |
class ApiRestV2GroupAddprivilegeRequest(object):
"""Implementation of the 'Api Rest V2 Group Addprivilege Request' model.
TODO: type model description here.
Attributes:
name (string): Name of the group
id (string): The GUID of the group to query.
privileges (list of PrivilegeEnum): List of privileges
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"id": 'id',
"privileges": 'privileges'
}
def __init__(self,
name=None,
id=None,
privileges=None):
"""Constructor for the ApiRestV2GroupAddprivilegeRequest class"""
# Initialize members of the class
self.name = name
self.id = id
self.privileges = privileges
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
id = dictionary.get('id')
privileges = dictionary.get('privileges')
# Return an object of this model
return cls(name,
id,
privileges)
@classmethod
def validate(cls, val):
"""Validates value against class schema
Args:
val: the value to be validated
Returns:
boolean : if value is valid against schema.
"""
return SchemaValidatorWrapper.getValidator(APIHelper.get_schema_path(os.path.abspath(__file__))).is_valid(val) | /restapisdktest-1.6.8.tar.gz/restapisdktest-1.6.8/restapisdk/models/api_rest_v_2_group_addprivilege_request.py | 0.6488 | 0.402568 | api_rest_v_2_group_addprivilege_request.py | pypi |
from io import BytesIO
import os
import json
import pkgutil
import textwrap
from typing import Any, Dict, Iterable, List
from urllib.request import urlopen
import pandas as pd
# This is the tag in http://github.com/vega/vega-datasets from
# which the datasets in this repository are sourced.
SOURCE_TAG = "v1.29.0"
def _load_dataset_info() -> Dict[str, Dict[str, Any]]:
"""This loads dataset info from three package files:
restart_datasets/datasets.json
restart_datasets/dataset_info.json
restart_datasets/local_datasets.json
It returns a dictionary with dataset information.
"""
def load_json(path: str) -> Dict[str, Any]:
raw = pkgutil.get_data("restart_datasets", path)
if raw is None:
raise ValueError(
"Cannot locate package path restart_datasets:{}".format(path)
)
return json.loads(raw.decode())
info = load_json("datasets.json")
descriptions = load_json("dataset_info.json")
local_datasets = load_json("local_datasets.json")
for name in info:
info[name]["is_local"] = name in local_datasets
for name in descriptions:
info[name].update(descriptions[name])
return info
class Dataset(object):
"""Class to load a particular dataset by name"""
_instance_doc = """Loader for the {name} dataset.
{data_description}
{bundle_info}
Dataset source: {url}
Usage
-----
>>> from vega_datasets import data
>>> {methodname} = data.{methodname}()
>>> type({methodname})
{return_type}
Equivalently, you can use
>>> {methodname} = data('{name}')
To get the raw dataset rather than the dataframe, use
>>> data_bytes = data.{methodname}.raw()
>>> type(data_bytes)
bytes
To find the dataset url, use
>>> data.{methodname}.url
'{url}'
{additional_docs}
Attributes
----------
filename : string
The filename in which the dataset is stored
url : string
The full URL of the dataset at http://vega.github.io
format : string
The format of the dataset: usually one of {{'csv', 'tsv', 'json'}}
pkg_filename : string
The path to the local dataset within the vega_datasets package
is_local : bool
True if the dataset is available locally in the package
filepath : string
If is_local is True, the local file path to the dataset.
{reference_info}
"""
_additional_docs = ""
_reference_info = """
For information on this dataset, see https://github.com/vega/vega-datasets/
"""
base_url = "https://cdn.jsdelivr.net/npm/vega-datasets@" + SOURCE_TAG + "/data/"
_dataset_info = _load_dataset_info()
_pd_read_kwds = {} # type: Dict[str, Any]
_return_type = pd.DataFrame
@classmethod
def init(cls, name: str) -> "Dataset":
"""Return an instance of this class or an appropriate subclass"""
clsdict = {
subcls.name: subcls
for subcls in cls.__subclasses__()
if hasattr(subcls, "name") or
hasattr(subcls, "state")
}
return clsdict.get(name, cls)(name)
def __init__(self, name: str):
info = self._infodict(name)
self.name = name
self.methodname = name.replace("-", "_")
self.filename = info["filename"]
self.url = self.base_url + info["filename"]
self.format = info["format"]
self.pkg_filename = "_data/" + self.filename
self.is_local = info["is_local"]
self.description = info.get("description", None)
self.references = info.get("references", None)
self.__doc__ = self._make_docstring()
def _make_docstring(self) -> str:
info = self._infodict(self.name)
# construct, indent, and line-wrap dataset description
description = info.get("description", "")
if not description:
description = (
"This dataset is described at " "https://github.com/vega/vega-datasets/"
)
wrapper = textwrap.TextWrapper(
width=70, initial_indent="", subsequent_indent=4 * " "
)
description = "\n".join(wrapper.wrap(description))
# construct, indent, and join references
reflist = info.get("references", []) # type: Iterable[str]
reflist = (".. [{0}] ".format(i + 1) + ref for i, ref in enumerate(reflist))
wrapper = textwrap.TextWrapper(
width=70, initial_indent=4 * " ", subsequent_indent=7 * " "
)
reflist = ("\n".join(wrapper.wrap(ref)) for ref in reflist)
references = "\n\n".join(reflist) # type: str
if references.strip():
references = "References\n ----------\n" + references
# add information about bundling of data
if self.is_local:
bundle_info = (
"This dataset is bundled with vega_datasets; "
"it can be loaded without web access."
)
else:
bundle_info = (
"This dataset is not bundled with vega_datasets; "
"it requires web access to load."
)
return self._instance_doc.format(
additional_docs=self._additional_docs,
data_description=description,
reference_info=references,
bundle_info=bundle_info,
return_type=self._return_type,
**self.__dict__
)
@classmethod
def list_datasets(cls) -> List[str]:
"""Return a list of names of available datasets"""
return sorted(cls._dataset_info.keys())
@classmethod
def list_local_datasets(cls) -> List[str]:
return sorted(
name for name, info in cls._dataset_info.items() if info["is_local"]
)
@classmethod
def _infodict(cls, name: str) -> Dict[str, str]:
"""load the info dictionary for the given name"""
info = cls._dataset_info.get(name, None)
if info is None:
raise ValueError(
"No such dataset {0} exists, "
"use list_datasets() to get a list "
"of available datasets.".format(name)
)
return info
def raw(self, use_local: bool = True) -> bytes:
"""Load the raw dataset from remote URL or local file
Parameters
----------
use_local : boolean
If True (default), then attempt to load the dataset locally. If
False or if the dataset is not available locally, then load the
data from an external URL.
"""
if use_local and self.is_local:
out = pkgutil.get_data("restart_datasets", self.pkg_filename)
if out is not None:
return out
raise ValueError(
"Cannot locate package path restart_datasets:{}".format(
self.pkg_filename
)
)
else:
return urlopen(self.url).read()
def __call__(self, use_local: bool = True, **kwargs) -> pd.DataFrame:
"""Load and parse the dataset from remote URL or local file
Parameters
----------
use_local : boolean
If True (default), then attempt to load the dataset locally. If
False or if the dataset is not available locally, then load the
data from an external URL.
**kwargs :
additional keyword arguments are passed to data parser (usually
pd.read_csv or pd.read_json, depending on the format of the data
source)
Returns
-------
data :
parsed data
"""
datasource = BytesIO(self.raw(use_local=use_local))
kwds = self._pd_read_kwds.copy()
kwds.update(kwargs)
if self.format == "json":
return pd.read_json(datasource, **kwds)
elif self.format == "csv":
return pd.read_csv(datasource, **kwds)
elif self.format == "tsv":
kwds.setdefault("sep", "\t")
return pd.read_csv(datasource, **kwds)
elif self.format == "xls" or self.format == "xlsx":
return pd.read_excel(datasource, **kwds)
else:
raise ValueError(
"Unrecognized file format: {0}. "
"Valid options are ['json', 'csv', 'xlsx', 'xls', 'tsv']."
"".format(self.format)
)
@property
def filepath(self) -> str:
if not self.is_local:
raise ValueError("filepath is only valid for local datasets")
else:
return os.path.abspath(
os.path.join(os.path.dirname(__file__), "_data", self.filename)
)
class Census(Dataset):
name = "co-est2019-alldata"
_pd_read_kwds = {"encoding": "ISO-8859-1"}
class BLS(Dataset):
name = "all_data_M_2019.xlsx"
_pd_read_kwds = {}
class FipsList(Dataset):
name = "list1_2020.xls"
_pd_read_kwds = {}
class CovidSurge(Dataset):
name = "covid-surge-who.xlsx"
_pd_read_kwds = {}
class Georgia(Dataset):
name = 'Georgia.csv'
_pd_read_kwds = {}
class NorthCarolina(Dataset):
name = 'NorthCarolina.csv'
_pd_read_kwds = {}
class Wyoming(Dataset):
name = 'Wyoming.csv'
_pd_read_kwds = {}
class NewHampshire(Dataset):
name = 'NewHampshire.csv'
_pd_read_kwds = {}
class Wisconsin(Dataset):
name = 'Wisconsin.csv'
_pd_read_kwds = {}
class Minnesota(Dataset):
name = 'Minnesota.csv'
_pd_read_kwds = {}
class Maryland(Dataset):
name = 'Maryland.csv'
_pd_read_kwds = {}
class Pennsylvania(Dataset):
name = 'Pennsylvania.csv'
_pd_read_kwds = {}
class Texas(Dataset):
name = 'Texas.csv'
_pd_read_kwds = {}
class Florida(Dataset):
name = 'Florida.csv'
_pd_read_kwds = {}
class Michigan(Dataset):
name = 'Michigan.csv'
_pd_read_kwds = {}
class Hawaii(Dataset):
name = 'Hawaii.csv'
_pd_read_kwds = {}
class Tennessee(Dataset):
name = 'Tennessee.csv'
_pd_read_kwds = {}
class DistrictOfColumbia(Dataset):
name = 'DistrictOfColumbia.csv'
_pd_read_kwds = {}
class Arkansas(Dataset):
name = 'Arkansas.csv'
_pd_read_kwds = {}
class Ohio(Dataset):
name = 'Ohio.csv'
_pd_read_kwds = {}
class Nebraska(Dataset):
name = 'Nebraska.csv'
_pd_read_kwds = {}
class Montana(Dataset):
name = 'Montana.csv'
_pd_read_kwds = {}
class Indiana(Dataset):
name = 'Indiana.csv'
_pd_read_kwds = {}
class SouthDakota(Dataset):
name = 'SouthDakota.csv'
_pd_read_kwds = {}
class Mississippi(Dataset):
name = 'Mississippi.csv'
_pd_read_kwds = {}
class Counties(Dataset):
name = 'counties.csv'
_pd_read_kwds = {}
class Connecticut(Dataset):
name = 'Connecticut.csv'
_pd_read_kwds = {}
class Oklahoma(Dataset):
name = 'Oklahoma.csv'
_pd_read_kwds = {}
class Colorado(Dataset):
name = 'Colorado.csv'
_pd_read_kwds = {}
class Maine(Dataset):
name = 'Maine.csv'
_pd_read_kwds = {}
class SouthCarolina(Dataset):
name = 'SouthCarolina.csv'
_pd_read_kwds = {}
class list1_2020(Dataset):
name = 'list1_2020.xls'
_pd_read_kwds = {}
class NorthDakota(Dataset):
name = 'NorthDakota.csv'
_pd_read_kwds = {}
class Louisiana(Dataset):
name = 'Louisiana.csv'
_pd_read_kwds = {}
class Utah(Dataset):
name = 'Utah.csv'
_pd_read_kwds = {}
class Virginia(Dataset):
name = 'Virginia.csv'
_pd_read_kwds = {}
class NewYork(Dataset):
name = 'NewYork.csv'
_pd_read_kwds = {}
class NewMexico(Dataset):
name = 'NewMexico.csv'
_pd_read_kwds = {}
class Delaware(Dataset):
name = 'Delaware.csv'
_pd_read_kwds = {}
class Kentucky(Dataset):
name = 'Kentucky.csv'
_pd_read_kwds = {}
class Massachusetts(Dataset):
name = 'Massachusetts.csv'
_pd_read_kwds = {}
class Alabama(Dataset):
name = 'Alabama.csv'
_pd_read_kwds = {}
class Arizona(Dataset):
name = 'Arizona.csv'
_pd_read_kwds = {}
class California(Dataset):
name = 'California.csv'
_pd_read_kwds = {}
class Nevada(Dataset):
name = 'Nevada.csv'
_pd_read_kwds = {}
class Alaska(Dataset):
name = 'Alaska.csv'
_pd_read_kwds = {}
class Illinois(Dataset):
name = 'Illinois.csv'
_pd_read_kwds = {}
class Idaho(Dataset):
name = 'Idaho.csv'
_pd_read_kwds = {}
class Oregon(Dataset):
name = 'Oregon.csv'
_pd_read_kwds = {}
class Iowa(Dataset):
name = 'Iowa.csv'
_pd_read_kwds = {}
class Vermont(Dataset):
name = 'Vermont.csv'
_pd_read_kwds = {}
class WestVirginia(Dataset):
name = 'WestVirginia.csv'
_pd_read_kwds = {}
class Kansas(Dataset):
name = 'Kansas.csv'
_pd_read_kwds = {}
class NewJersey(Dataset):
name = 'NewJersey.csv'
_pd_read_kwds = {}
class Washington(Dataset):
name = 'Washington.csv'
_pd_read_kwds = {}
class Missouri(Dataset):
name = 'Missouri.csv'
_pd_read_kwds = {}
class RhodeIsland(Dataset):
name = 'RhodeIsland.csv'
_pd_read_kwds = {}
class DataLoader(object):
"""Load a dataset from a local file or remote URL.
There are two ways to call this; for example to load the iris dataset, you
can call this object and pass the dataset name by string:
>>> from vega_datasets import data
>>> df = data('iris')
or you can call the associated named method:
>>> df = data.iris()
Optionally, additional parameters can be passed to either of these
Optional parameters
-------------------
return_raw : boolean
If True, then return the raw string or bytes.
If False (default), then return a pandas dataframe.
use_local : boolean
If True (default), then attempt to load the dataset locally. If
False or if the dataset is not available locally, then load the
data from an external URL.
**kwargs :
additional keyword arguments are passed to the pandas parsing function,
either ``read_csv()`` or ``read_json()`` depending on the data format.
"""
_datasets = {name.replace("-", "_"): name for name in Dataset.list_datasets()}
def list_datasets(self):
return Dataset.list_datasets()
def __call__(self, name, return_raw=False, use_local=True, **kwargs):
loader = getattr(self, name.replace("-", "_"))
if return_raw:
return loader.raw(use_local=use_local, **kwargs)
else:
return loader(use_local=use_local, **kwargs)
def __getattr__(self, dataset_name):
if dataset_name in self._datasets:
return Dataset.init(self._datasets[dataset_name])
else:
raise AttributeError("No dataset named '{0}'".format(dataset_name))
def __dir__(self):
return list(self._datasets.keys())
class LocalDataLoader(DataLoader):
_datasets = {name.replace("-", "_"): name for name in Dataset.list_local_datasets()}
def list_datasets(self):
return Dataset.list_local_datasets()
def __getattr__(self, dataset_name):
if dataset_name in self._datasets:
return Dataset.init(self._datasets[dataset_name])
elif dataset_name in DataLoader._datasets:
raise ValueError(
"'{0}' dataset is not available locally. To "
"download it, use ``vega_datasets.data.{0}()"
"".format(dataset_name)
)
else:
raise AttributeError("No dataset named '{0}'".format(dataset_name)) | /restart_datasets-0.6-py3-none-any.whl/restart_datasets/core.py | 0.796094 | 0.409752 | core.py | pypi |
[](https://mybinder.org/v2/gh/restartus/restart/master?filepath=nb%2F)
# COVID-19 Restart the World
Getting the world up and running again isn't going to be easy. This project is
a start at making that easier. Feel free to take pieces and contribute. While
we wait for an effective treatment or a vaccine, we need all these pieces to
save lives from the infection and get the economy back to work.
It has four major components:
1. Modeling the Need. Model the entire world of the COVID-19 response from the
epidemeological models, to the consumer confidence model and finally to the
supply chain models that estimate how much Personal Protective Equiptment (PPE),
Test kits and other equipment is needed to make it work.
2. Providing the Material. This so confusing that having a set of templates that
you can embed into any website to provide the latest training, the condensed
recommendation is critical. And then on the backend a marketplace that is easy
to setup so you can buy or download what you need in one step.
3. Changing Norms. No amount of equipment works without changing how people
work, live and play. This is a set of behavioral models and content that
works against the different segments that need to be protected. From children,
to the elderly to specific groups that are disportionately effect, getting the
right message at the right time is key.
4. Community. Getting the entire network of public, private and non-profit
organizations working together.
## Table of Contents
1. [Conceptual Diagram](#conceptual-diagram)
2. [Project Management](#project-management)
3. [Directory Layout](#directory-layout)
4. [Versions and Releases](#versions-and-releases)
- [Release Points](#release-points)
- [Release Notes](#release-notes)
- [Excel Bug Notes](#excel-bug-notes)
5. [The Various Documents](#the-various-documents)
6. [Data Sources](#data-sources)
7. [Installation Guidelines](#mac-installation-guidelines)
- [Using Git LFS and XLTrail](#using-git-lfs-and-xltrail)
- [Gitpod.io](#gitpod-io)
8. [Other Repos](#other-repos)
9. [Release Schedule](#release-schedule)
10. [GitHub, XLTrail, and Git LFS](#github-xltrail-and-git-lfs)
11. [Notes on Using Excel](#notes-on-using-excel)
12. [Binder](#binder)
## Conceptual diagram
We are building a system that calculates from several classes a system that
looks like:

## Project Management
The overall project is managed with [Github
Projects](https://github.com/restartus/covid-projection/projects/1). The process
works this way:
1. We assign items to people for the weekly sprint. Check the board to see what
issues you own
2. The syntax of each issue is in brackets Estimated time in hours to complete
Item name (actual used), so for instance `[3] Insert new Class (2)` which
means it will
take 3 hours to complete and you've used 2.
3. When estimating times, we are using the Fibonacci series as a rough guide so
assign hours as when estimating as `0.5, 1, 2, 3, 5, 8, 13, 21` for how many
hours something will take.
4. We don't use that for for much now but it is a good way to see how accurate
we are. You should try to turn on your 30 minute announcment on and see how
log it takes.
## Directory Layout
The directory layout has a few major areas:
- [data](data). This is where all the raw data is kept. Right now, this uses Git
LFS so that we have version control and this works since the data sets are
relatively small at at most a few GB. You do need git lfs installed to read
this.
- [bin](bin). This is where the developer tools live. They are mainly a subset
ported from @richtong [richtong](https://github.com/richtong/src) . The
most important is install.sh which should install the development environment
for Mac (for sure), Linux (hopefully) and Windows is in development. Our
standard dev environment is Mac, so let @richtong know if you want to become
a maintainer for the other builds.
- [lib](lib). Used by bin, this gives a standard development environment with
some standard variables like SOURCE_DIR you can use everywhere
- [model](model). This is where the new V2 model lives
- [nb](nb). This is for experiments and is our poor man's Jupyter Hub for
notebooks
- We do have some Excel sheets at the top, there is technical debt to fix the
Github actions to pull data from below, but they are named files that you copy
in Athena sheets
## Installation and Usage
To install with pip:
```
pip install restart
```
Simple example of analysis for state of California:
```python3
from restart import RestartModel
restart = RestartModel(config_dir='restart', population='oes', state='California')
model = restart.model # this contains all the data now
```
## Versions and Releases
The main release scheme is to alternate between adding features (the v1, v3,...)
and then solving technical debt issues and solidifying things, you can think of
these a v1, v1.x, v2, v2.x, etc
Our first v1 models (codenamed Athena) are in the [excel](excel) these are
Excel spreadsheets and they have now stabilized with a series of 1.x releases.
All versions are kept there.
Our next generation or v2 models (codenamed Balsa) are the conversion to Python
and implement the Surge models once again and then will add additional
extensions. Most of this work lives in the Jupyter and src subdirectories.
Our v2.x models (codenamed Concrete) will be a technical catchup release where we
put in the CD/CI features
As with all semvar compliant systems, major versions v1, v2,... maintain the
same interface, that is they produce the same output and are called the same
## Release Points
The system release two spreadsheets right now as of v1.x at
[releases](https://github.com/restartus/restart/releases). These are right taken
from the files at the root and renamed appropriately. So when you want to do
- covid-who-surge-washington.xlsx. This is the copied latest file that is the
- large model for State of Washington including SOC
- covid-who-surge-single.xlsx. This is the
template is for a new client. It is not reentrent, so for each new client,
make a copy
- covid-who-surge-single-cook.xlsx. Thsi is the first release that uses the
single for Cook county restaurants
## Release Notes
- v1.4.5 Removes the data table from main washington model and creates a county
only model
- v1.4.4 First cut of the stockpile model
- v1.4.2. This has fixes for disinfection calculation and introduction to a
single sheet models
- v1.3.1. Fixes the the washington surge and has the new york city surge
## Excel Bug Notes
If you put a data table inside the system, you will get a external content
error. To fix this, you should go to the Data tab and look at connections. This
is the place to remove external connections
[External](https://answers.microsoft.com/en-us/msoffice/forum/all/excel-for-mac-external-data-connections-have-been/03d3efa9-d540-4b00-8bc8-a06ddb7c4ea1)
## The Various Documents
- [README.md](README.md) You are reading this, the basic introduction
- [INTRODUCTION.md](INTRODUCTION.md). The model and how it works at a high level
- [RESEARCH.md](RESEARCH.md). Various call reports on new ideas
## Data sources
### Apple Mobility
A regularly published CSV on mobility data
[Apple](https://www.apple.com/covid19/mobility)
### Google Mobility
A regular data source from [Google](https://www.google.com/covid19/mobility/)
## The PowerBI cube
The supporting documents needed are mainly in PowerBI.
- [OCC Based Employment](https://azure.microsoft.com/email/?destination=https%3A%2F%2Fapp.powerbi.com%2FMobileRedirect.html%3Faction%3DOpenReport%26reportObjectId%3De9e58394-451a-429b-aed1-20ef6e317dc4%26ctid%3D1e355c04-e0a4-42ed-8e2d-7351591f0ef1%26groupObjectId%3Df2f0cf78-3695-4dd6-a6fd-cf2063d3195c%26OpenAppFromWindowsPCAndTablet%3Dfalse%26emailSource%3DReportInvitation&p=bT0xN2RlMjVkYy04ODg4LTQwYmYtOTJmYy1iNDEwODVlNDAzZDEmdT1hZW8mbD1Nb2JpbGVSZWRpcmVjdC5odG1s)
## Installation Guidelines
@richtong will shortly generate instructions for Windows machines, but here is
an outline of steps: First you need to install [Homebrew](https://brew.sh/) so
that you can automatically install the rest of the stuff. This is done with
Terminal and you have to run a machine
incantation. Just copy and paste in the next line
```shell
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
```
Now that you have done that, then run the following commands which installs the
right pieces for you:
```shell
brew install git git-lfs
# You want this in a dedicated directory
mkdir -p ~/ws
```
Now you need to create a logon at [GitHub](https://github.com) and then ask
@richtong for rights to get into this repo then find a nice place for the
forecast and run these commands to get all the latest spreadsheets directly into
your machine
### Using Git LFS and XLTrail
You will need to install Git LFS as the models are quite large at 200MB and up
with and it will really clog your machine:
```shell
git lfs install
# to get whatever version you need, just use the version number
git checkout v1.0
# to get the latest daily developmen release
```
Also we support the use of release tags, the current versions are:
- v1.0. This version went to the State of Washington for their surge mask
forecasting and is over at the Office of Financial Management. This contains
all the data in a single sheet and runs on Excel or Google Sheets.
- rich-dev. This is the development fork, please use with caution.
```shell
# assuming you put this into a directory called ws
cd ~/ws
git clone https://github.com/restartpartners/covid-forecast
# You will be asked to enter your user name and password
cd covid-forecast
# this says get big files like Excel spreadsheets
git lfs init
# pull means get the latest from the cloud
git pull
```
And that is all there is to it, rather than asking for one at a time. You can
then edit the sheets directly as often as you like.
```shell
# This gives you your own private copy so no one can mess with your stuff
# so if the branch can be any name but by convention it is typically your
# name and then a dash and then what you are working on
git checkout -b _matt_-_utah_
# This makes sure that the server knows about your private copy
# origin means the cloud
git push --set-upstream _matt_-_utah_ origin
# now make any changes that you want
# when you are done, you just commit the changes and then push it to the cloud
git add -A
git commit -m "I did some amazing things and other comments"
git push
# When you want others to see it, let @richtong know and he will merge it into
the base so others can see it
```
### Using Gitpod
You can start thing in Google Chrome or Firefox with an extension
## Other repos
The best way to work on this is to see what others are doing. In
[https://github.com/restartpartners](https://github.com/restartpartners])
so here are the certain ways of doing it which are forked:
- [NY Time](https://github.com/NYTimes/covide-19-data). This is the New York
- Times data repository
- [Datahub.io](https://github.com/datasets/covid-19). Time series data from datahub.io
- [Imperial College of London](https://github.com/ImperialCollegeLondon/covid19model) their specfic
model
- [Reich](https://github.com/neherlab/covid19_scenarios). The summary and
visualization of all scenarios
## Release Schedule
### v1.4 Shipped
Add non-Washington cubes to the model. Create a separate sheet for
non-Washington that has the simplified model.
Enable:
1. Picking of different rows will be done by matching IDs rather than indexing
2. For a given class, you can select a PPE row and then give it a weight. That
3. The stretch goal. Patients will be added as a column so we can spread them
across the cubes
### v1.3.2 - Surge model with non-Washington sheet -- In development
Uses the same basic form, but we do not assume Washington population data
### v1.3.1 - Released - Washington Surge Model Corrected, NYC and Snohomish
This is the fixed model as the transition to Excel corrupted some cells and we
lost formulas.
The enhancements are:
1. AT the end of the model, any pivot table can be inserted into the model and
it will calculate based on that. It also slices a county appropriately based
on the Washington Cube
2. The model now uses named ranges in Analysis 8 and 9 so just changing the
analysis is not just changing names rather than relinking absolute cell
references
3. Adds the NYC analysis as well at the bottom as well as Snohomish county and
it now uses a Pivottable and external data rather than copying it all into
the sheet, so this becomes more of analysis tool.
4. Also adds a checksum in Analysis 8 and on to make sure the additions are
corect. Note that if you put in a Level but not an Essential, you will have
issues. That is if no Essential is listed, it is not added to the total. That's
an easy way to exclude groups by the way.
5. This is on the way to generalization so if you want to change and to add new
analysis, copy down the latest Analysis and then change the formulas in the
SUMIFS after you define new range names, the range names are where you replace
the `N`with the number of the analysis
AnalysisNItems. All the items being managed
AnalysisNLevels. The protection levels. These are numeric and index off the
protection level table. Soon they will be a tuple. The levels can be a fraction
in which case it takes a percentage from the next level, so 4.2 means 20% from
level 5 and the rest from level 4
AnalysisNEssential. The degree of urgency to start. Arbitrarily, less than 4
means a non-essential, (aka a non-bootstrap of the economy worker)
AnalysisNLowLower. The lower bound of non-essential, >0
AnalysisNLowUpper. The upper bound, usually <4
AnalysisNHighLower. The lower bound of essentially, usually >=4
AnalysisNHighUpper. The upper bound, usually <=999
To change a pivot, make sure you have lots of rows below, more than what the new
pivot needs, the go to Pivot Analysis/Change Data Source.
Right now these are absolute paths, this still needs to get resolved how to make
this portable.
Then you have to relink all of the data out of the pivot table. This takes some
time as you cannot just copy and paste, but have to do a hard equal to get the
right extraction
The Pivot Table does not work with Hierarchical data, so in that case it is
probably better to either go to the owrk of chaning the lables so they are or to
just copy the table in.
## v1.0 - Released - Washington State Surge Model (deprecated)
This is the model that went to the Washington State Office of Financial
Management and we will send updates as needed. It has the following features
(or bugs depending on how you look at it). Note that this model has a big bug,
the formulas were inadvertently deleted, so use 1.3.1 or later
- Washington State only. No splits nor is this a country model
- Five forecasts in one. NAICS-2, NAICS-6, SOC, Small Business only, Not
employed splits
- Depends on WHO EFST v1.2 surge to estimate healthcare needs augmented with
- DOH Tier 1-4 dates 14 April (not updated to latest) and some LNI rules for
Construction but not updated to latest phases
- Estimates conserved use
## Github, XLTrail, and Git LFS
Github and Excel spreadsheets are not really used much together, but as we are
going to have both Excel spreadsheets and real code integrated, this seems like
a good place to put it.
There are a few changes to your workflow when you are using this tool that is
different from just storing Excel on your machine and emailing models around or
having it in a shared dropbox:
1. The versions are taken care of for you. This repo uses [XL
Trail](https://xltrail.com) to monitor all the spreadsheet changes. It
generates a cell-by-cell comparison of what has actually changed.
2. Github keeps track of every revision, so you can have a different set of
models and these get tagged so you can make sure you are getting the right
model. This is independent of the filename of the model, so you can make sure
you are getting the right model at the right time.
3. It stores every copy of the model in it so you can always roll back and
recover a model that is way cool..
4. The final piece is Git LFS or Large File Storage, this makes it blazingly
fast to store even GB models (we do!) into the system
## Notes on using Excel
### Dealing with PivotTables
Here is are the difficult parts on a Mac. A Pivot table cannot be moved with
copy and paste, instead, you need to go to the Ribbon and view analyze and there
is an entry called `Move Pivottable` which lets you move it.
When you select into a Pivot Table, you get a reference that is a cell number,
you get a named reference that looks like `PIVOTTABLE`. This works really well
for anything that is developed that has the same number of rows which is great.
### Excel does not like Google Sheets
There is some sort of bug where Excel does not like certain forumulas in Google
Sheets so it deleted all the formulas that were using the Sumproduct. So this
formula needed to be recreated on the Excel side and we should not use Google
Sheets as a result.
## The resource formula
The key formula in the spreadsheet is the that takes the level of protection and
multiplies it against the row that is at the PPE level
The first formula only handled discrete rows like, so it indexes against a fixed
protection at G7:Q13 and then indexes into it with the D365 which is the
protection level. We add one because we index at 0. Then we calculate what the
column. Then multiply by the population
```excel
=@INDEX($G$7:$Q$13,$D365+1,COLUMN(H365)-COLUMN($G343)+1)*$E365
```
## Handling blending of rows
In many cases, a particular industry or job classification does not fit into any
one category, so we use sumproduct to figure this out. The key is to find the
region and then spread the data
So this first calculation gives you the two rows
```excel
=sumproduct(_two columns in the use matrix_, _the percentage split between the
two_)
```
The way that you collect the sum is by using the trick that modulo 1 gives you a
fraction so mod(3.4, 1) is 0.4 :
This is where the spreadsheet broke because Google Sheets and Excel,
```excel
{MOD(protection,1),
```
Now this gives the weight average use and then you just multiply by the
population and you are done
```excel
= sumproduct * population
```
One problem with
[Sumproduct](https://blog.udemy.com/excel-sumproduct/?utm_source=adwords&utm_medium=udemyads&utm_campaign=DSA_Catchall_la.EN_cc.US&utm_content=deal4584&utm_term=_._ag_95911180068_._ad_436653296108_._kw__._de_c_._dm__._pl__._ti_dsa-841699839063_._li_1027744_._pd__._&matchtype=b&gclid=EAIaIQobChMI-Leli9jD6QIV9Al9Ch3BXgn-EAAYASAAEgLSJfD_BwE) is that it does not like the vectors to be of
different shapes, so you can't do `sumproduct({1, 0}, {1 ; 0}), it needs both to
be row vectors.
In another note when used with a
[Boolean](https://exceljet.net/excel-functions/excel-sumproduct-function), you
can use it if you do a double negative to coerce TRUE/FALSE into a number
```excel
sumproduct( --(A2:A6="TX"), B2+B6)
```
will only add numbers where the A column has the string "TX" in it. So you need to
[transpose](https://support.office.com/en-us/article/transpose-function-ed039415-ed8a-4a81-93e9-4b6dfac76027) them first.
So the key formula looks like this where $J$7:$T$13 is the table and $D51 is the
index into it. Note that it automatically rounds down. Then the column
calculation makes sure you get the correct column starting from J, finally you
want the row below and then the trick is to transpose the next values.
This gets rid of the need to use the parentheses notation which might not be
that portable. This is just a simple function now. were E51 has 1-mod(d51) or
the amount for rounddown(e51,0) and mod(d51) is the fraction above.
```excel
=SUMPRODUCT(OFFSET($J$7:$T$13,$D51,COLUMN(J:J)-COLUMN($J:$J),2,1),TRANSPOSE($E51:$F51))*$G51
```
## Guards on the SUMIFS
The next complicated formula relies on ranges and does the summing. The main
trick here is that it uses SUMIFS as a conditional and you need to have a
protection level one greater at the end of each, so there is a mythical "7" or
N+1. It made construction of the model very neat as a result.
## Automatically deployment
[xltrail](https://www.xltrail.com/blog/how-to-manage-and-release-excel-files-on-github-part2)
has a great explanation of how to make this work but see .github/workflow for
additional files.
The main trick here is the need to add a step to checkout the right Excel
spreadsheet.
To make the deployment work, there is a named file, currently
covid-surge-who.xlsx which you need to copy the latest model into. Do not
symlink this as git lfs will get confused on the build
## Automated Testing
You can use [XLWings](https://docs.xlwings.org/en/stable/installation.html) to
run an Excel application from Mac or PC. This uses a PIP package to control
Excel.
Since GitHub Actions allows runneers with Windows, you could theoretically start
a Windows machine, load Excel and run it with the Python to do testing. Man that
seems complicated though.
Another approach might be to take models which are compatible with Google Sheets
and push the model into Google Drive and drive it with Javascript
## Mobility Modeling
We need a way to model economic behavior and mobility.
## Recoded Python model Studies
This takes the study done with Jupyter notebooks and turns it into code as a
demo:
## Model 0.0 feasibility of using Streamlit
This is the visual display demo using streamlit. Filled with dummy data
- [streamlit](streamlit) experiments with using Streamlit for web display.
- [model0/dashboard0.py](model0/dashboard0.py) is the spaghetti code that is the first implementation of the
model. Retain for testing purposes
- [model](model) The current v2 model
- [logging](logging). Experiments in building a robust logging system that works
across streamlit and command line
- [altair](altair). Studies of using the Altair plotting interface to Vegas-lite
- [yaml](yaml). Experiments in using YAML as model input
- [namespace](namespace). Testing of multiple packages in the same namespace
- [iterator](iterator). Learn how to Model to iterate against all Base classes
inside of it
## Model that is the real python model first using streamlit as a demo v0.1
- [model0/dashboard.py[(model0/dashboard.py). This is not yet complete but
implements the class model described in the readme
- [README.ipynb](README.ipynb). This is the main read me that describes how the
module works. This is best read by starting [colab](https://colab.research.google.com)
opening but it describes the equations and has the test code for the model.
The main thing that it does is to make the variable names easy to understand.
## The real code for the Python model for v2.x
The other files follow the standard Python scheme and is ready for docstring
documentation
- [src](src) the source for the python. it's on the floor right now.
- [doc](doc) when we get documentation working we are using makedocs using
docstring as the production tool.
## Note we are using [gravizo.com](https://gravizo.com) to render graphs with
graphviz and test. It actually supports DOT, PlnatUML and UML Graph as well as
SVG so really useful for illustrations that are not just dumb graphics as
explained by @tlmak0 at https://github.com/tlmak0/gravizo. The way it works is
that you pass gravizo.com the URL of the README or whatever file, it will then
parse it looking for Graphviz or other commands. It works because you set a
magic tag which must be unique in the text for it to find
The main tools you need here are the raw file link which you can get by looking
at [github.com](https://help.data.world/hc/en-us/articles/115006300048-GitHub-how-to-find-the-sharable-download-URL-for-files-on-GitHub)
and clicking on the `raw` button for a file and then put it
through https://urlencoder.org to get the percent-encoding also call the URL
encode.
Although the gravizo.com site shows an easier way with a direct embed but this
[no longer](https://gist.github.com/svenevs/ce05761128e240e27883e3372ccd4ecd)
works with github. Which is sad because the only way the indirect method works
is for public repos since private repos require an authentication key.
Most of the actual work is kept in a Jupyter Notebook
[README.ipynb](README.ipynb) points to the latest one. You can launch from
[colab](https://colab.research.google.com) to view it or you can see it statically
rendered on [github](https://github.com)
## Why no scientific notation
The most confusing part about this model are the many parameters. We use
Einstein summations and the working model is in [README.ipynb](README.ipynb).
Note that github markdown does not support Latex, so you have to use a
[hack](https://gist.github.com/a-rodin/fef3f543412d6e1ec5b6cf55bf197d7b) to
display it properly by using an image call, so we just remove this from this
readme, otherwise it tracks the Jupyter Notebook but without the scientific
notation.
## Class Structure
The main components of the v2 model are in a diagram

<details>
<summary></summary>
custom_mark
digraph "Class Model" {
node [shape=box]
subgraph Pop_class {
style=filled
P [label="Population, Essentiality"]
}
D [label=Disease]
P -> D [label="Social Mobility"]
D -> P [label=Patients]
E [label=Economy]
P -> E [label="Stage, Economic Activity"]
E -> P [label="GDP, Employment"]
subgraph Res {
R [label=Resource]
R -> P [label=Delivery]
P -> R [label=Demand]
I [label=Inventory]
R -> I [label=Fill]
I -> R [label=Use]
}
S [label=Supply]
R -> S [label="Sales Order"]
S -> R [label=Fulfillment]
}
custom_mark
</details>
## The main classes (deprecated)
You should see the Jupyter notebook for complete documentation
Then each major module can be subclassed from this and your can replace it. The
current list of modules are and in the notation, the class name is the major
part of the variable. Note that is copied from Jupyter so you will see $Latex$
formula for those of you who can read that, otherwise you can ignore it.
## Model Class
Model or `model`. This is the core framework. It holds the dimensions and
pointers to all the module component instances. It's main use is to "dimension"
the problem so that the other subclasses know the size of the problem. It also
holds points to the entire "graph" of objects (similar to a Keras object in
machine learning. So the process is to add "layers" or model elements, then run
the model as needed with a "solver" that is used for specific purposes and later
for optimization of an objective function.
The objects in the world which has a single character $symbol$ or a 3-4
character `_short_name_` name and then some other facts
## Population Class
Population as or `pop`. This holds the population and details about it. It's
main output are two fold wtih a set of variables that are 'inside' Population
- $P_{pd}$ or `Population.attr_pd[p, d] for p Populations with d details on
each such as number of covid patients or number of runs per day
- $P^R_{pn}$ or `Population.to_res_pn[p, n]`. This is a given populations use
of all n resources and is the per capita, per day figure.
- $P^T_{pn}$ or `Population.total_pn[p, n]`. This is the total population usage
- $P^{LC}_{en}$ or `Population.level_cost_en[e, n]`. for every essentiality
level, what is the cost for each resource n.
- $P^{LT}_{en}$ or `Population.level_total_cost_en[e, n]`. The total cost for
- every essential level for each resource
$P^{D}_{ln}$ or `Population.demand_ln`. This is the conversion from essential
levels to items used where l is the number of levels and n is the number of
resources. It is the core burn rate analysis
- $P^{L}_{p,l}$ or `Population.to_level_pl[p, l]`. Converts a population into
levels of essentiality and use Finally there are various properties that these
objects can have. these are handles as superscripts in the formula notation or
as the second word in the code as snake_case.
- Burn rate $B$ or `burn` which is the use per day per person of a resource
- Total units $U$ or `total` which is the total needed for an entire population
- Cost $C$ or `cost`. The cost per unit
- Total $T$ or `total_cost`. The total for an entire population
- Summary Level of Essentiality `level`. The population summarized by summary
levels
that are used to restart the economy in phases and stop it the same way
- Demand $D$ or `demand`. This calculates the burn rates or usage of the
products. In the surge model these are done per person per day which
generates an lxn matrix
## Resource class
Resource `res`. The resources needed, it takes demand from Population and
returns to the population what can actualy be supplied.
- $R_{na}$ or `Resource.attr_na[n, a]` were Resource data for n items with a
attributes (like it's volume and cost), by convention, the first column has
a one in it for easy matrix multiplication
- Supply $S$ or `supp`. The sources of supply and input of resources
- Inventory $I$ or `inv`. What is currenty on hand
## Economy Class
Economy $E$ or `econ`. This is a model of the economy that takes in the
Population and the degree of economic activity and returns the GDP and
employment and other measures of work
## Disease Class
Disease $D$ or `disease`. This models the progression of the disease and takes
in the population and social mobility and it returns the number of patients,
deaths and recovered patients.
But here are the major variables as a glossary and these usually have two forms,
the canonical dataframe and then an array form for tensors that are more than
two dimensions. In general, we operate on the array form and display with the df
form. The names change, but the first is for the surge model and the second for
the full range of data plus time series. And in colons are the class that
creates it
-
- Resource.attr_n_df (Resource.attr_na_df). Resource list main labels and it is all 1's in the surge model
then extends to the a attributes which are mainly things like volume.
- Population.attr_p (Population.attr_pd_df). The populations we are studying.
In this case, we are talking about d details including things like number of
COVID patients in each population.
- Demand.usage_res_ln_df (Demand.usc_res_dln_df). The Usage for each protection
level for a resource per capita
Then we have transformations that are typically a `to` attached:
- Population.to_usage_pl. Converts p populations to the l usage level
- Population.attr_p (Population.attr_pd). Population attributes with a 1 in the
first column always but then you can have others like covid patients
- Population.resource_pn. Resource neede per capita
## Binder
To get a url for a specific notebook, go [here](mybinder.org)
and enter in https://github.com/restartus/restart as the repo,
master as the branch, and the path to the notebook as the file path.
The url will be generated as you enter these fields.
Here is an example file path: nb/SAMPLE.ipynb
If a notebook uses voila and you want the url to launch with voila
immediately, change the type of "Path to a notebook file (optional)"
from file to url and enter "voila/render/<path-to-nb>".
| /restart-2.6.7.tar.gz/restart-2.6.7/README.md | 0.556159 | 0.952662 | README.md | pypi |
from typing import Optional
from restart.src.log import Log
from restart.src.model import Model
from restart.src.util import set_config
class RestartModel:
"""Bootstrap a model object in a notebook environment."""
def __init__(
self,
population: str = "dict",
organization: Optional[str] = None,
csv: Optional[str] = None,
county: Optional[str] = None,
state: Optional[str] = None,
subpop: Optional[str] = None,
configdir: str = ".",
output: Optional[str] = None,
resource: str = "dict",
inventory: str = "dict",
demand: str = "dict",
financial: str = "dict",
mobility: str = "ensemble",
epi: str = "imhe",
):
"""Initialize a model object."""
# set up all attributes
for i in locals().items():
if i[0] != "self":
setattr(self, i[0], i[1])
# set the logging
self.name = "model"
self.log_root = Log(self.name)
self.log = log = self.log_root.log
log.propagate = False
log.debug(f"{__name__=}")
# set up the config
self.config = set_config(configdir)
self.set_model()
def set_model(self, **kwargs):
"""Bootstrap the model."""
# override defaults with keywords
for k, v in kwargs.items():
setattr(self, k, v)
# build the model
self.model = (
Model(self.name, log_root=self.log_root)
.set_configure(self.config)
.set_filter(
county=self.county, state=self.state, subpop=self.subpop
)
.set_population(type=self.population)
.set_organization(type=self.organization)
.set_resource(type=self.resource)
.set_inventory(type=self.inventory)
.set_demand(type=self.demand)
.set_financial(type=self.financial)
.set_epi(type=self.epi)
.set_mobility(type=self.mobility)
.set_output(out=self.output, csv=self.csv)
) | /restartus-2.5.0.1-py3-none-any.whl/restart/restart.py | 0.886101 | 0.230292 | restart.py | pypi |
import datetime
import os
from pathlib import Path
from typing import Dict, Optional, Union
import confuse # type: ignore
import ipysheet # type: ignore
import numpy as np # type: ignore
import pandas as pd # type: ignore
from IPython.display import display # type: ignore
def set_config(path: str):
"""Set a confuse configuration."""
os.environ["CONFIGDIR"] = os.path.abspath(path)
config = confuse.Configuration("config")
return config
def is_dir_or_file(name: str) -> bool:
"""Is path a directory or a file.
It's hard to believe this is not a function already
"""
path = Path(name)
if path.is_dir() or path.is_file():
return True
return False
# sets the frame properly but does need to understand the model
# so goes into the model method
def set_dataframe(
arr: np.ndarray,
label: Optional[Dict],
index: Optional[str] = None,
columns: Optional[str] = None,
) -> pd.DataFrame:
"""Set the dataframe up.
Using the model data Dictionary and labels
"""
# we use get so that if there is no item it returns None
# https://www.tutorialspoint.com/python/dictionary_get.htm
df = pd.DataFrame(
arr,
index=label[index]
if label is not None and index is not None
else None,
columns=label[columns]
if label is not None and columns is not None
else None,
)
df.index.name = index
df.columns.name = columns
return df
def load_dataframe(fname: str) -> pd.DataFrame:
"""Load h5 file into a dataframe.
Args:
Name of h5 file
Returns:
The dataframe serialized in the h5 file
"""
df: pd.DataFrame = pd.read_hdf(fname, "df")
return df
def datetime_to_code(code: Union[str, datetime.datetime]) -> str:
"""Convert datetime objects to valid OCC codes.
Gets around the problem of Excel automatically converting date-looking
strings into datetime objects that can't be undone.
Args:
code: Either a datetime object or string represnting an OCC code
Returns:
The code in valid OCC code format
"""
if type(code) is datetime.datetime:
return str(code.month) + "-" + str(code.year) # type: ignore
else:
return str(code)
def to_df(sheet):
"""Shorter function call for sheet -> df."""
return ipysheet.pandas_loader.to_dataframe(sheet)
def to_sheet(df):
"""Shorter function call for df -> sheet."""
return ipysheet.pandas_loader.from_dataframe(df)
def format_cells(sheet, money=False):
"""Format ipysheet cells with specific attributes."""
for cell in sheet.cells:
setattr(cell, "read_only", True)
if money is True:
setattr(cell, "numeric_format", "$0,000")
else:
setattr(cell, "numeric_format", "0,000")
def format_population(sheet, money=False, round=False):
"""Generate a formatted sheet optimized for displaying population."""
df = to_df(sheet)
if round:
df = df.round()
index_name = "Population"
headers = list(df.index)
df.insert(loc=0, column=index_name, value=headers)
sheet = to_sheet(df)
format_cells(sheet, money)
sheet.row_headers = False
return sheet
def display_population(sheet, money=False, round=False):
"""Display sheet with specific, population-optimized formatting."""
sheet = format_population(sheet, money=money, round=round)
display(sheet) | /restartus-2.5.0.1-py3-none-any.whl/restart/src/util.py | 0.889625 | 0.37542 | util.py | pypi |
import math
import os
from typing import Dict, List, Optional, Tuple
import confuse # type: ignore
import numpy as np # type: ignore
import pandas as pd # type: ignore
from .data import Data # type: ignore
from .filtermodel import Filter # type: ignore
from .load_csv import LoadCSV # type: ignore
from .log import Log # type: ignore
from .population import Population # type: ignore
from .util import datetime_to_code, load_dataframe # type: ignore
class PopulationOES(Population):
"""Transforms OES data into a format compatible with the model.
Performs calculations to give us an estimate of population distributions on
a county-wide basis.
Attributes:
oes_df: Dataframe containing OES data
code_df: Dataframe containing conversions between county and MSA code
pop_df: Dataframe containing census population data per county
cty_name: Name of a US county
state_name: Name of a US state
df: The processed, OES data in a dataframe
"""
def __init__(
self,
config: confuse.Configuration,
filt: Filter,
log_root: Optional[Log] = None,
):
"""Initialize.
Read the paths in and create dataframes, generate mappings
"""
super().__init__(config, log_root=log_root)
self.log_root = log_root
log = self.log
log.debug(f"module {__name__=}")
# get location and population from the filter
self.location = filt.location
try:
if self.location["county"] is not None:
self.location["county"] += " County"
except KeyError:
log.debug("invalid location input {self.location=}")
return
self.subpop = filt.subpop
self.codes: list
self.load_data(config, self.location)
def load_data(self, config, location):
"""Do most of the initializing here.
That way the stuff we don't want passed is hidden.
"""
# extract the dataframes we need from the input files
if config is not None:
source = config["Paths"].get()
source = LoadCSV(source=source).data
oes_df = load_dataframe(
os.path.join(source["Root"], source["OES"])
)
code_df = self.format_code(
load_dataframe(os.path.join(source["Root"], source["CODE"]))
)
pop_df = load_dataframe(
os.path.join(source["Root"], source["POP"])
)
xls_df = self.format_map(
load_dataframe(os.path.join(source["Root"], source["XLS"]))
)
# initialize unsliced dataframe from oes data
if location["county"] is None and location["state"] is None:
df = self.create_country_df(oes_df)
elif location["county"] is not None and location["state"] is not None:
location["county"] = location["county"]
df = self.create_county_df(location, oes_df, code_df, pop_df)
else:
df = self.create_state_df(location, oes_df)
# filter the population
if self.subpop == "healthcare":
df = self.health_filter(df)
elif self.subpop == "wa_tier2_opt1":
df = self.wa_tier2_opt1_filter(df)
elif self.subpop == "wa_tier2_opt2":
df = self.wa_tier2_opt2_filter(df)
# the actual data passed onto the model
self.pop_detail_df = self.drop_code(df)
self.population_pP_tr = Data(
"population_pP_tr",
config,
log_root=self.log_root,
p_index=list(self.pop_detail_df.index),
P_index=["Size"],
array=self.drop_code(df).to_numpy(),
)
pop_to_burn_df = self.pop_to_burn_rate(df, xls_df)
self.pop_demand_per_unit_map_pd_um: Data = Data(
"pop_demand_per_unit_map_pd_um",
config,
log_root=self.log_root,
p_index=list(pop_to_burn_df.index),
array=pop_to_burn_df.to_numpy(),
)
self.set_essential(xls_df, config)
# detail_pd_arr = detail_pd_df["Size"].to_numpy()
# self.pop_demand_per_unit_map_pd_um: Data = self.pop_to_burn_rate(
# df,
# map_df
# )
# map_labs, map_arr = self.create_map(df, map_df)
# load into dictionary
# df_dict = {}
# df_dict["detail_pd_df"] = detail_pd_df
# df_dict["detail_pd_arr"] = detail_pd_arr
# df_dict["map_labs"] = map_labs
# df_dict["map_arr"] = map_arr
def format_code(self, df: pd.DataFrame) -> pd.DataFrame:
"""Perform dataframe transformations specific to list1_2020.xls.
Args:
df: A dataframe
Returns:
A transformed dataframe to match the format needed for this project
"""
# Specify columns to bypass issues with underlining in original excel
df.columns = [
"CBSA Code",
"MDC Code",
"CSA Code",
"CBSA Title",
"Metropolitan/Micropolitan Statistical Area",
"Metropolitan Division Title",
"CSA Title",
"County Equivalent",
"State Name",
"FIPS State Code",
"FIPS County Code",
"Central/Outlying County",
]
# Select MSA, as this is what OES data is based off of
df = df[
df["Metropolitan/Micropolitan Statistical Area"]
== "Metropolitan Statistical Area"
]
# Drop data we don't need
df = df.drop(
[
"MDC Code",
"CSA Code",
"Metropolitan Division Title",
"Metropolitan/Micropolitan Statistical Area",
"CSA Title",
"FIPS State Code",
"FIPS County Code",
"Central/Outlying County",
],
axis=1,
)
# Reset indices for aesthetic appeal
df = df.reset_index(drop=True)
return df
def format_map(self, df: pd.DataFrame) -> pd.DataFrame:
"""Manually slice the excel model to get protection level mappings.
Args:
df: The excel model loaded into a dataframe
Returns:
The dataframe sliced to give the mappings
"""
# manually redo indexing and select the rows we need
# TODO: need new sheet that isn't precariously sliced like this
DF_COLUMNS = 2528
DF_START = 2529
DF_END = 3303
df.columns = df.iloc[DF_COLUMNS]
df = df.iloc[DF_START:DF_END]
df = df[
["Washington SOT", "SOC", "Type", "Level", "Essential (0 lowest)"]
]
# fix datetime objects and drop empty rows
df["SOC"] = df["SOC"].apply(datetime_to_code)
df = df.dropna(axis="rows").reset_index(drop=True)
return df
def pop_to_burn_rate(
self, df: pd.DataFrame, map_df: pd.DataFrame
) -> Tuple[list, np.ndarray]:
"""Generate mappings for OCC codes and population levels.
Args:
df: A dataframe that has OCC codes
Returns:
Dictionary of the population level mappings
"""
map_arr = []
labels = []
for code in df["occ_code"]:
arr = np.zeros(7)
try:
ind = map_df[map_df["SOC"] == code].index[0]
level = map_df.iloc[ind]["Level"]
except IndexError:
if code.startswith("29-") or code.startswith("31-"):
level = 5.5
else:
level = 3
# assign integer levels
if type(level) is int:
arr[level] = 1
# assign multiple levels
else:
arr[math.floor(level)] = 0.5
arr[math.ceil(level)] = 0.5
# add to dictionary
name = df[df["occ_code"] == code].index.tolist()[0]
labels.append(name)
map_arr.append(arr)
pop_to_level_df = pd.DataFrame(map_arr, index=labels)
return pop_to_level_df
def find_code(self, location: Dict, code_df: pd.DataFrame) -> int:
"""Finds the MSA code of given county.
Args:
None
Returns:
Integer corresponding to the given county's MSA code
"""
if code_df is None:
raise ValueError(f"{code_df=} should not be None")
return int(
code_df[
(code_df["County Equivalent"] == location["county"])
& (code_df["State Name"] == location["state"])
]["CBSA Code"].iloc[0]
)
def calculate_proportions(
self,
code: int,
location: Dict,
code_df: pd.DataFrame,
pop_df: pd.DataFrame,
) -> float:
"""Calculate county proportion relative to total MSA pop.
Args:
code: MSA code for desired county
Returns:
A float corresponding to the ratio of the county's population in
relation to its MSA code.
"""
if code_df is None:
raise ValueError(f"{code_df=} should not be None")
if pop_df is None:
raise ValueError(f"{code_df=} should not be None")
# List the counties in the same MSA code as cty_name
counties = list(
code_df[code_df["CBSA Code"] == str(code)]["County Equivalent"]
)
# Construct dictionary mapping county names to constituent populations
populations = {}
for county in counties:
pop = int(
pop_df[
(pop_df["CTYNAME"] == county)
& (pop_df["STNAME"] == location["state"])
]["POPESTIMATE2019"]
)
populations[county] = pop
# Calculate total population in MSA code
total_pop = sum(populations.values())
# Divide individual county population by total MSA population
return populations[location["county"]] / total_pop
def load_county(
self,
location: Dict,
oes_df: pd.DataFrame,
code_df: pd.DataFrame,
pop_df: pd.DataFrame,
) -> Tuple[float, pd.DataFrame]:
"""Slice the OES data by county for further processing downstream.
Args:
None
Returns:
proportion: Float corresponding to proportion of residents from
MSA code living in given county
df: Sliced OES dataframe
"""
# find county MSA CODE
code = self.find_code(location, code_df)
# calculate proportion of MSA code's residents living in county
proportion = self.calculate_proportions(
code, location, code_df, pop_df
)
# initialize dataframe as slice of OES data
df = oes_df[oes_df["area"] == code][
["occ_code", "occ_title", "o_group", "tot_emp"]
]
# replace placeholders with 0
df = df.replace(to_replace="**", value=0)
return proportion, df
def load_state(self, location: Dict, oes_df: pd.DataFrame) -> pd.DataFrame:
"""Slice the OES data by state for further processing downstream.
Args:
None
Returns:
df: Sliced OES dataframe
"""
# slice OES dataframe by state
col_list = ["occ_code", "occ_title", "o_group", "tot_emp"]
df = oes_df[(oes_df["area_title"] == location["state"])][col_list]
# replace placeholders with 0
df = df.replace(to_replace="**", value=0)
return df
def load_country(self, oes_df: pd.DataFrame) -> pd.DataFrame:
"""Get the OES data for the whole country.
The default setting for OES population
"""
# slice OES dataframe by the whole county
col_list = ["occ_code", "occ_title", "o_group", "tot_emp", "naics"]
df = oes_df[
(oes_df["area_title"] == "U.S.") & (oes_df["naics"] == "000000")
][col_list]
df = df.drop(["naics"], axis=1)
# replace placeholders with 0
df = df.replace(to_replace="**", value=0)
return df
def fill_uncounted(
self, major: pd.DataFrame, detailed: pd.DataFrame
) -> pd.DataFrame:
"""Create special categories for uncounted employees.
Args:
major: Dataframe containing totals for major OCC categories
detailed: Dataframe containing totals for detailed OCC categories
Returns:
The detailed dataframe with extra categories to account for
uncounted workers
"""
code_list = list(set(major["occ_code"]))
for code in code_list:
pat = code[0:3]
filt = detailed[detailed["occ_code"].str.startswith(pat)]
# Calculate number of employees unaccounted for within the major
# OCC code
total = int(major[major["occ_code"] == code]["tot_emp"])
det_total = np.sum(filt["tot_emp"])
delta = total - det_total
if delta > 0:
# create dataframe row and append to detailed dataframe
name = list(major[major["occ_code"] == code]["occ_title"])[0]
add_lst = [
[pat + "XXXX", "Uncounted " + name, "detailed", delta]
]
add_df = pd.DataFrame(add_lst, columns=list(major.columns))
detailed = detailed.append(add_df, ignore_index=True)
return detailed
def format_output(self, df: pd.DataFrame) -> pd.DataFrame:
"""Format dataframe to fit the model by dropping some columns.
Args:
df: The dataframe we want to format
Returns:
The formatted dataframe
"""
df = df.drop(df[df["tot_emp"] == 0].index)
df = df.drop(["o_group"], axis=1)
df = df.reset_index(drop=True)
return df
def drop_code(self, df: pd.DataFrame) -> pd.DataFrame:
"""Drop the OCC code from a dataframe.
So that it has the right format for the model.
"""
col_labs = ["Size"]
self.codes = list(df["occ_code"])
df = df.drop(["occ_code"], axis=1)
df.columns = col_labs
return df
def create_county_df(
self,
location: Dict,
oes_df: pd.DataFrame,
code_df: pd.DataFrame,
pop_df: pd.DataFrame,
) -> pd.DataFrame:
"""Generate dataframe containing processed OES data by county.
Args:
None
Returns:
The processed dataframe
"""
# Load in sliced dataframe
proportion, df = self.load_county(location, oes_df, code_df, pop_df)
# Split into 'major' and 'detailed' OCC categories
major = df[df["o_group"] == "major"].copy()
detailed = df[df["o_group"] == "detailed"].copy()
# Some detailed categories don't have information availble - remove
# these and place into "Uncounted" category
detailed = self.fill_uncounted(major, detailed)
# Adjust 'tot_emp' columns by MSA code proportion
detailed["tot_emp"] = detailed["tot_emp"].apply(
lambda x: int(x * proportion)
)
# Format to fit model
detailed = self.format_output(detailed)
detailed.set_index("occ_title", drop=True, inplace=True)
return detailed
def create_state_df(
self, location: Dict, oes_df: pd.DataFrame
) -> pd.DataFrame:
"""Generate dataframe containing processed OES data by state.
Args:
None
Returns:
The processed dataframe
"""
# Load in sliced dataframe
df = self.load_state(location, oes_df)
major = df[df["o_group"] == "major"].copy()
detailed = df[df["o_group"] == "detailed"].copy()
# Some detailed categories don't have information available - remove
# these and place into "Uncounted" category
detailed = self.fill_uncounted(major, detailed)
# Format to fit model
detailed = self.format_output(detailed)
detailed.set_index("occ_title", drop=True, inplace=True)
return detailed
def create_country_df(self, oes_df: pd.DataFrame) -> pd.DataFrame:
"""Generate dataframe containing processed OES data for US.
Args:
oes_df: Dataframe containing OES data
Returns:
The processed dataframe
"""
df = self.load_country(oes_df)
major = df[df["o_group"] == "major"].copy()
detailed = df[df["o_group"] == "detailed"].copy()
detailed = self.fill_uncounted(major, detailed)
detailed = self.format_output(detailed)
detailed.set_index("occ_title", drop=True, inplace=True)
return detailed
def health_filter(self, df: pd.DataFrame) -> pd.DataFrame:
"""Return a detailed breakdown of healthcare workers with OCC codes.
Args:
None
Returns:
Dataframe object with the detailed breakdown
"""
# 29-XXXX and 31-XXXX are the healthcare worker codes
filt = df[
(df["occ_code"].str.startswith("29-"))
| (df["occ_code"].str.startswith("31-"))
]
return filt
def wa_tier2_opt1_filter(self, df: pd.DataFrame) -> pd.DataFrame:
"""Return a detailed breakdown of Washington tier 2 workers.
Args:
None
Returns:
Dataframe object with the detailed breakdown
"""
filt = df[
(df["occ_code"].str.startswith("33-"))
| (df["occ_code"].str.startswith("29-"))
| (df["occ_code"].str.startswith("31-"))
]
return filt
def wa_tier2_opt2_filter(self, df: pd.DataFrame) -> pd.DataFrame:
"""Return a detailed breakdown of Washington tier 2 workers.
Args:
None
Returns:
Dataframe object with the detailed breakdown
"""
occ_list = [
"29-1292",
"29-2040",
"29-1215",
"29-1126",
"29-1223",
"29-1181",
"29-1221",
"31-1120",
"31-1131",
"39-4031",
"31-1132",
"39-4011",
"31-1133",
"33-2011",
"31-9091",
"33-3012",
"33-3021",
"33-9093",
"33-3041",
"33-3051",
"33-3052",
"29-2052",
]
filt = df[df["occ_code"].isin(occ_list)]
return filt
def set_essential(self, df: pd.DataFrame, config) -> pd.DataFrame:
"""Get population essential levels from the excel model.
Manually slice the dataframe
"""
# df.columns = df.iloc[2528]
# df = df.iloc[2529:3303]
# df = df[["SOC", "Essential (0 lowest)"]]
pop_level: List = []
df["SOC"] = df["SOC"].apply(datetime_to_code)
df.reset_index(drop=True, inplace=True)
for code in list(self.codes):
arr = np.zeros(2)
try:
ind = df[df["SOC"] == code].index[0]
except IndexError:
ind = -1
if ind > 0:
level = df.iloc[ind]["Essential (0 lowest)"]
else:
level = np.random.randint(0, high=6)
if level >= 5:
arr[0] = 1
else:
arr[1] = 1
pop_level.append(arr)
self.pop_to_popsum1_per_unit_map_pp1_us = Data(
"pop_to_popsum1_per_unit_map_pp1_us",
config,
log_root=self.log_root,
p_index=list(self.pop_detail_df.index),
array=np.array(pop_level),
) | /restartus-2.5.0.1-py3-none-any.whl/restart/src/population_oes.py | 0.798894 | 0.363336 | population_oes.py | pypi |
from typing import Callable, Dict
import confuse # type: ignore
import yaml
from .base import Base # type: ignore
from .log import Log # type: ignore
class Output(Base):
"""Creates an output object.
Supply a callback that provides each model element in turn since we
we cannot pass it. For while mutual import fails so instead we need
a callback so that output and run through all the elements of model.
http://effbot.org/pyfaq/how-can-i-have-modules-that-mutually-import-each-other.htm
"""
def __init__(
self,
get_element: Callable,
config: confuse.Configuration,
log_root: Log = None,
out: str = None,
csv: str = None,
):
"""Do the initializing.
Generate the config files
"""
super().__init__(log_root=log_root)
log = self.log
log.debug(f"In {__name__}")
self.config = config
self.out = out
self.csv = csv
self.generate_config(get_element)
self.write_csv()
def generate_config(self, get_element: Callable):
"""Generate a new config yaml."""
log = self.log
new_config: Dict = {}
for section in ["Parameter", "Dimension", "Paths", "Model"]:
new_config[section] = self.config[section].get()
log.debug(f"Wrote all config data into {new_config=}")
# get the in memory data read from csv's and elsewhere or computed
# note you cannot pass the model over so you need to do this as a call
# back but the code looks like
for key, value in get_element():
# we have more in memory objects than in the Model
# so check for existance. For instance filtering is in memory only
if key in new_config["Model"]:
new_config["Model"][key]["array"] = value.array
self.write_config(new_config)
def write_config(self, config: Dict):
"""Writes config dict to yaml file."""
if self.out is not None:
with open(self.out, "w") as yamlfile:
yaml.dump(config, yamlfile)
def write_csv(self):
"""Writes to a CSV file."""
if self.csv is not None:
df = self.demand.total_demand_pn_df.copy()
# insert population into the dataframe
pop = list(self.pop.detail_pd_df["Size"])
df.insert(loc=0, column="Size", value=pop)
df.to_csv(self.csv) | /restartus-2.5.0.1-py3-none-any.whl/restart/src/output.py | 0.788217 | 0.213131 | output.py | pypi |
import logging
from typing import Dict, Optional, Tuple
import pandas as pd # type:ignore
from .log import Log # type: ignore
class BaseLog:
"""Logging is at the very bottom."""
def __init__(self, log_root: Optional[Log] = None):
"""Set the Root Log."""
# since we have no log otherwise
self.log_root = log_root
self.log = (
log_root.log_class(self)
if log_root is not None
else logging.getLogger(__name__)
)
self.log.debug(f"{self=}")
class Base(BaseLog):
"""Base for all model classes.
Base strings and description.
"""
# do not put variable here unless you want them the same
# across all classes see https://docs.python.org/3/tutorial/classes.html
# https://stackoverflow.com/questions/9056957/correct-way-to-define-class-variables-in-python
def __init__(self, log_root: Log = None):
"""Set base varabiles.
Mainly the descriptions
"""
super().__init__(log_root=log_root)
self.description: Dict = {}
def set_description(self, name: str, description: str):
"""Set the variable description.
The descriptions are carried in each class so they are self documenting
May change this to centralized at some point.
Gets rid of the equal sign if it is there from a f string
Also only uses the last member name
"""
# we can't use a higher level logger
log: logging.Logger = logging.getLogger(__name__)
# https://stackoverflow.com/questions/18425225/getting-the-name-of-a-variable-as-a-string/58451182#58451182
# Using Python 3.8 f strings
# you must use double quotes inside single quotes for strings
log.debug(f"{object=}")
# this doesn't work, we need the real object's name so has to happen in
# caller
# name = f'{object=}'.split('=')[0]
# log.debug(f'set self.description[{name}]')
# https://stackoverflow.com/questions/521502/how-to-get-the-concrete-class-name-as-a-string
# pdb.set_trace()
class_name = self.__class__.__name__
# https://stackoverflow.com/questions/599953/how-to-remove-the-left-part-of-a-string
# clean up the name so you only get the basename after the period
# https://www.tutorialspoint.com/How-to-get-the-last-element-of-a-list-in-Python
name = name.split("=")[0].split(".")[-1]
model_name = class_name + "." + name
log.debug(f"{model_name=} {name=}")
# log.debug(f'set model.description[{model_name}]')
self.description[name] = description
# method chaining
return self
def __iter__(self):
"""Iterate over all Pandas DataFrames.
Uses a list of all frames
"""
self.df_list = [
k for k, v in vars(self).items() if isinstance(v, pd.DataFrame)
]
self.df_len = len(self.df_list)
self.df_index = 0
return self
def __next__(self) -> Tuple[str, pd.DataFrame]:
"""Next Pandas DataFrame.
Iterates through the list of dataframes
"""
if self.df_index >= self.df_len:
raise StopIteration
key = self.df_list[self.df_index]
value = vars(self)[key]
self.df_index += 1
return key, value | /restartus-2.5.0.1-py3-none-any.whl/restart/src/base.py | 0.860486 | 0.262647 | base.py | pypi |
from typing import Optional
# Insert the classes of data we support here
import confuse # type: ignore
# Note that pip install data-science-types caused errors
from .base import Base # type: ignore
from .data import Data # type: ignore
from .log import Log # type: ignore
class Population(Base):
"""Population objects are created here.
It has a default model in it for testing which is the Bharat model
You should override it with a new child class
Population statistics and model for population
Initially this contains population of p x 1
Later it will be p x d where d are the detail columns
For instance the number of covid patients
The number of trips or visits or runs for a given population
The second matrix p population describes is how to map population to:w
l demand levels to give a p x l. Long term this becomes d x p x l
So that you can have a different run rate for each detail d of population
How are resources consumed by different levels in a population
This is the key first chart in the original model.
It takes a set of l protection levels and then for each of n resources,
provides their burn rate. So it is a dataframe that is l x n
In this first version, burn rates are per capita, that is per person in a
given level.
In a later version, we will allow different "burn rates" by population
attributes so this becomes a 3 dimensional model. For convenience, the
Frame object we have retains objects in their simple dataframe form since
it is easy to extract
For multidimenstional indices, we keep both the n-dimensional array
(tensor) and also have a method ot convert it to a multiindex for use by
Pandas
There is a default mode contained here for testing, you should override
this by creating a child class and overriding the init
We also create a friendly name and long description as document strings
eventually this will become a file we read in that is a data description
but for now it is a dictionary
"""
def __init__(self, config: confuse.Configuration, log_root: Log = None):
"""Initialize all variables.
All initialization here and uses type to determine which method to call
The default is PopulationDict which reads from the model.data
"""
# https://stackoverflow.com/questions/1385759/should-init-call-the-parent-classs-init/7059529
# to pick up the description
super().__init__(log_root=log_root)
log = self.log
log.debug("In %s", __name__)
self.config = config
# these need to be filled out by the subclasses
# define them here for type checking purposes
# And to have them instantiated for subclasses
self.population_pP_tr: Optional[Data] = None
self.pop_demand_per_unit_map_pd_um: Optional[Data] = None
self.pop_to_popsum1_per_unit_map_pp1_us: Optional[Data] = None | /restartus-2.5.0.1-py3-none-any.whl/restart/src/population.py | 0.871993 | 0.730843 | population.py | pypi |
from __future__ import annotations
# For slices of parameters
from enum import Enum
from typing import List
import confuse # type: ignore
import numpy as np # type: ignore
from .base import Base # type: ignore
from .data import Data # type: ignore
from .log import Log # type: ignore
# https://docs.python.org/3/library/enum.html
# These are the slices used
class InvParam(Enum):
"""List positions in Inventory Parameter List."""
INIT = 0
EOQ = 1
MIN = 2
class Inventory(Base):
"""Inventory - Manages all the inventorys that are used in the model."""
def __init__(
self,
config: confuse.Configuration,
log_root: Log = None,
):
"""Initialize the Inventorys.
Does a read in parameters
"""
# initialize logging and description
super().__init__(log_root=log_root)
log = self.log
self.config = config
log.debug(f"in {__name__}")
self.inv_by_popsum1_total_rp1n_tc = Data(
"inv_by_popsum1_total_rp1n_tc", config, log_root=log_root
)
self.inv_by_popsum1_param_iIp1n_tp = Data(
"inv_by_popsum1_param_iIp1n_tp", config, log_root=log_root
)
log.debug(f"{self.inv_by_popsum1_param_iIp1n_tp.df=}")
# TODO: This should be taken from the param file
self.inv_init_by_popsum1_total_rp1n_tc = Data(
"inv_init_by_popsum1_total_rp1n_tc", config, log_root=log_root
)
log.debug(f"set inv to {self.inv_init_by_popsum1_total_rp1n_tc=}")
self.inv_eoq_by_popsum1_total_rp1n_tc = Data(
"inv_eoq_by_popsum1_total_rp1n_tc", config, log_root=log_root
)
self.inv_min_by_popsum1_total_rp1n_tc = Data(
"inv_min_by_popsum1_total_rp1n_tc", config, log_root=log_root
)
# Helpers to handle period calculations
self.inv_min_by_popsum1_in_periods_rp1n_pc = Data(
"inv_min_by_popsum1_in_periods_rp1n_pc", config, log_root=log_root
)
self.inv_average_orders_by_popsum1_per_period_rp1n_uf = Data(
"inv_average_orders_by_popsum1_per_period_rp1n_uf",
config,
log_root=log_root,
)
self.inv_order_by_popsum1_total_rp1n_tc = Data(
"inv_order_by_popsum1_total_rp1n_tc", config, log_root=log_root
)
# can only set minmum once inv_min exists and order too
self.set_min(self.inv_init_by_popsum1_total_rp1n_tc)
def set_average_orders_per_period(
self, inv_average_orders_by_popsum1_per_period_rp1n_uf: Data
):
"""Set Average Inventory Used Every Period.
This could just be a simple set but leave here for clarity
"""
self.inv_average_orders_by_popsum1_per_period_rp1n_uf = (
inv_average_orders_by_popsum1_per_period_rp1n_uf
)
def set_min_in_periods(
self,
min_periods_r_pc: List,
) -> Inventory:
"""Sets the Minimum Inventory as measured in Average Days Shipments.
A Helper function that fill out an entire array and then passes it
down
"""
log = self.log
log.debug(f"{min_periods_r_pc=}")
self.inv_min_by_popsum1_in_periods_rp1n_pc.array = np.einsum(
"r,rxn->rxn",
min_periods_r_pc,
np.ones_like(self.inv_min_by_popsum1_in_periods_rp1n_pc.array),
)
self.set_min_in_periods_array(
self.inv_min_by_popsum1_in_periods_rp1n_pc
)
return self
def set_min_in_periods_array(
self,
min_periods_rp1n_tc: Data,
) -> Inventory:
"""Set with an array that is for all resources in periods."""
log = self.log
self.inv_min_by_popsum1_in_periods_rp1n_pc.array = (
min_periods_rp1n_tc.array
)
log.debug(f"{self.inv_min_by_popsum1_in_periods_rp1n_pc.df=} ")
# https://numpy.org/doc/stable/reference/generated/numpy.empty_like.html
# note we need r=1 for this to work so we insert an empty dimension
# https://numpy.org/doc/stable/reference/generated/numpy.expand_dims.html
# needed this before we started calling with full range
# self.inv_min_by_popsum1_per_period_rp1n_uc.array = np.expand_dims(
# self.inv_min_by_popsum1_per_period_rp1n_uc.array, axis=0
# )
# need to do a dot product
self.inv_min_by_popsum1_total_rp1n_tc.array = np.einsum(
"rxn,rxn->rxn",
min_periods_rp1n_tc.array,
self.inv_average_orders_by_popsum1_per_period_rp1n_uf.array,
)
self.set_min(self.inv_min_by_popsum1_total_rp1n_tc)
return self
def set_min(self, min_by_popsum1_total_rp1n_tc: Data) -> Inventory:
"""Set the minimum inventory in periods_r.
This sets the minimum inventory and then forces an order in case we are
below the minimum
"""
log = self.log
# https://stackoverflow.com/questions/53375161/use-numpy-array-to-replace-pandas-dataframe-values
self.inv_by_popsum1_total_rp1n_tc.array = (
min_by_popsum1_total_rp1n_tc.array
)
log.debug(f"{self.inv_by_popsum1_total_rp1n_tc.df=}")
self.supply_order()
return self
def supply_order(self) -> Inventory:
"""Order from supplier.
Order up to the minimum inventory
"""
# hack here because we only do ranges for min inventory
self.inv_order_by_popsum1_total_rp1n_tc.array = (
self.inv_min_by_popsum1_total_rp1n_tc.array
- self.inv_by_popsum1_total_rp1n_tc.array
)
# negative means we have inventory above safety levels
# so get rid of those
# https://www.w3inventory.com/python-exercises/numpy/python-numpy-exercise-90.php
self.inv_order_by_popsum1_total_rp1n_tc.array[
self.inv_order_by_popsum1_total_rp1n_tc.array < 0
] = 0
# now gross up the order to the economic order quantity
self.round_up_to_eoq(self.inv_order_by_popsum1_total_rp1n_tc)
self.log.debug(f"{self.inv_order_by_popsum1_total_rp1n_tc.df=}")
# now that we have an order rounded up and ready, let's get supply
self.fulfill(self.inv_order_by_popsum1_total_rp1n_tc)
return self
# https://stackoverflow.com/questions/2272149/round-to-5-or-other-number-in-python
def round_up_to_eoq(self, order_by_popsum1_total_rp1n_tc: Data) -> Data:
"""Round order up the economic order quantity.
Each order needs to get rounded up to an economic quantity
"""
if np.any(self.inv_eoq_by_popsum1_total_rp1n_tc.array <= 0):
raise ValueError(
f"EOQ not positive {self.inv_eoq_by_popsum1_total_rp1n_tc.df=}"
)
if np.any(order_by_popsum1_total_rp1n_tc.array < 0):
raise ValueError(
f"Negative order in {order_by_popsum1_total_rp1n_tc.df=}"
)
# So take the order and then get the distance to the eoc
# by using modulo
# https://stackoverflow.com/questions/50767452/check-if-dataframe-has-a-zero-element
# https://numpy.org/doc/stable/reference/generated/numpy.any.html
# https://softwareengineering.stackexchange.com/questions/225956/python-assert-vs-if-return
# do not use asserts they are stripped with optimization, raise errors
return (
order_by_popsum1_total_rp1n_tc.array
+ (
self.inv_eoq_by_popsum1_total_rp1n_tc.array
- order_by_popsum1_total_rp1n_tc.array
)
% self.inv_eoq_by_popsum1_total_rp1n_tc.array
)
def fulfill(self, order_by_popsum1_total_rp1n_tc: Data):
"""Fulfill an order form supplier.
This is a stub in that all orders are immediatley fulfilled
"""
log = self.log
log.debug(f"fulfill {order_by_popsum1_total_rp1n_tc=}")
self.inv_by_popsum1_total_rp1n_tc.array += (
order_by_popsum1_total_rp1n_tc.array
)
log.debug(f"{self.inv_by_popsum1_total_rp1n_tc.df=}")
def order(self, order_by_popsum1_total_rp1n_tc: Data) -> Data:
"""Order by Customer from Inventory.
Take a new order and then return what you can
It will check what is in inventory and then call the delivery method
returns: whats available to ship
"""
# Return as much as we can so if the order is bigger than
# the inventory, just ship it all out.
# the simple min won't work, need an element0-wise minimum
# https://numpy.org/doc/stable/reference/generated/numpy.minimum.html
self.inv_order_by_popsum1_total_rp1n_tc.array = np.minimum(
order_by_popsum1_total_rp1n_tc.array,
self.inv_by_popsum1_total_rp1n_tc.array,
)
# ship it!
self.inv_by_popsum1_total_rp1n_tc.array -= (
self.inv_order_by_popsum1_total_rp1n_tc.array
)
# now restock
self.supply_order()
return self.inv_order_by_popsum1_total_rp1n_tc | /restartus-2.5.0.1-py3-none-any.whl/restart/src/inventory.py | 0.7586 | 0.272272 | inventory.py | pypi |
import os
from typing import Dict, List, Optional
import pandas as pd # type:ignore
from .load import Load # type: ignore
from .log import Log # type: ignore
class LoadCSV(Load):
"""Converts Excel and CSV files into dataframe objects.
If you give it files with a .xlsx, .xls, or .csv extension, it will read
their data into a dataframe, and then safe the dataframe as
a h5 file with extension .h5. If you feed this class
a h5 file, it will simply pass through this class. This is
done so that we can minimize the amount of times the Excel/CSV
data must be processed - for larger files, it can be lengthy.
Attributes:
excel_ext: list of extensions attached to excel files
csv_ext: list of extensions attached to csv files
data: dictionary containing names of h5 files
"""
def __init__(
self,
source: Dict = None,
log_root: Optional[Log] = None,
excel_ext: List[str] = [".xlsx", ".xls"],
csv_ext: List[str] = [".csv"],
):
"""Initialize the Loader to read files.
Reads the files
"""
# logging setup
super().__init__(log_root=log_root)
log = self.log
log.debug(f"{self.log=} {log=}")
log.debug(f"module {__name__=}")
# extensions we check for
self.excel_ext = excel_ext
self.csv_ext = csv_ext
if source is None:
raise ValueError(f"{source=} should not be None")
try:
if source["Root"] is None:
raise ValueError(f"need root directory in {source=}")
except KeyError:
log.debug(f"{source=} invalid config")
return None
# read all files in the given root directory
files = os.listdir(source["Root"])
rootdir = source["Root"]
self.data: Dict = source
for fname in source:
# skip root key
if not fname == "Root":
path = source[fname]
log.debug(f"{path=}")
# split paths into name + extension
base, ext = os.path.splitext(path)
fullbase = os.path.join(rootdir, base)
try:
# look for h5 file in rootdir
if base + ".h5" in files:
log.debug(f"preexisting json found for {base=}")
self.data[fname] = base + ".h5"
else:
log.debug(f"generating h5 file for {base=}")
# excel to dataframe
if ext in self.excel_ext:
log.debug(f"loading {ext=} file")
df = pd.read_excel(fullbase + ext)
# csv to dataframe
elif ext in self.csv_ext:
log.debug(f"loading {ext=} file")
df = pd.read_csv(fullbase + ext)
else:
raise ValueError(f"{fname=} extension invalid")
# store dataframe and overwrite dictionary input
self.store_dataframe(fullbase, df)
self.data[fname] = base + ".h5"
# handle alternate utf encodings
except UnicodeDecodeError:
log.debug(f"loading {ext=} file with ISO-8859-1 encoding")
df = pd.read_csv(fullbase + ext, encoding="ISO-8859-1")
self.store_dataframe(fullbase, df)
self.data[fname] = base + ".h5"
def store_dataframe(self, name: str, df: pd.DataFrame) -> None:
"""Serializes a dataframe in h5 format.
Args:
name: name of the file we want to save
Returns:
None
"""
log = self.log
name = name + ".h5"
log.debug(f"{name=}")
df.to_hdf(name, key="df", mode="w")
return None | /restartus-2.5.0.1-py3-none-any.whl/restart/src/load_csv.py | 0.669529 | 0.221582 | load_csv.py | pypi |
from __future__ import annotations
from typing import Generator, List, Optional, Tuple
from .base import Base # type: ignore
from .demand import Demand # type: ignore
from .demand_dict import DemandDict # type: ignore
from .epi import Epi # type: ignore
from .epi_dict import EpiDict # type: ignore
from .epi_table import EpiTable # type: ignore
from .filtermodel import Filter # type: ignore
from .financial import Financial # type: ignore
from .financial_dict import FinancialDict # type: ignore
from .financial_table import FinancialTable # type: ignore
from .inventory import Inventory # type: ignore
from .inventory_dict import InventoryDict # type: ignore
from .log import Log # type: ignore
from .mobility import Mobility # type: ignore
from .mobility_dict import MobilityDict # type: ignore
from .mobility_table import MobilityTable # type: ignore
from .organization import Organization # type: ignore
from .organization_dict import OrganizationDict # type: ignore
from .output import Output # type: ignore
from .population import Population # type: ignore
from .population_dict import PopulationDict # type: ignore
from .population_oes import PopulationOES # type: ignore
from .population_wa import PopulationWA # type: ignore
from .resource_dict import ResourceDict # type: ignore
from .resourcemodel import Resource # type: ignore
class Model(Base):
"""Main model for planning.
They then will create the correct tables for use by the main computation
This model has pointers to the major elements of the model.
The model is made up of a series of classes for each of the major model
elements. Each of the major model elements are based on a class structure
that are kept in a network of links in the Model class. The Model acts as a
single global data structure for the entire project.
It uses chaining so that you can in a single statement set all the modeling
elements. These include:
Real resources. Like Populations or Organizations
Transforms. Which computes mapping of Population onto say Resources
Actions. These are things that affect Real Objects like Demand.
- LogBase. Just for logging so every class that wants to log needs this as
a base class
- Base. This has the descriptions baked in. Used to traverse the Model
class
when you want to print or interrogate the Model.
- Resource. Every time you create a new way to read or manage resources,
base on this. en.utf-8.add
"""
# https://satran.in/b/python--dangerous-default-value-as-argument
# https://stackoverflow.com/questions/2…
# do not do default assignment, it remembers it on eash call
# https://docs.python.org/3/library/typing.html
def __init__(self, name, log_root: Optional[Log] = None):
"""Initialize the model."""
# the long description of each
# https://stackoverflow.com/questions/1385759/should-init-call-the-parent-classs-init/7059529
super().__init__(log_root=log_root)
log = self.log
log.debug(f"{__name__=}")
self.name: str = name
if not log.hasHandlers():
print(f"{log=} has no handlers")
log.debug(f"{self.name=}")
def set_configure(self, config) -> Model:
"""Configure the Model.
Uses Loaded as a dictionary and puts it into model variables
"""
log = self.log
self.config = config
log.debug(f"{self.config=}")
return self
# TODO: This should be a generated set of methods as they are all identical
def set_population(self, type: str = None) -> Model:
"""Create population class for model.
Population created here
"""
# the old method
# self.population = Population(
# self.data, log_root=self.log_root, type=type
# )
# the super class population uses type to return the exact model
# filter is by happens after this
self.population: Population
self.filter: Filter
if type == "oes":
self.population = PopulationOES(
self.config,
self.filter,
log_root=self.log_root,
)
elif type == "wa":
self.population = PopulationWA(
self.config, self.filter, log_root=self.log_root
)
elif type == "dict":
# change this to the the naming of columns
self.population = PopulationDict(
self.config,
log_root=self.log_root,
)
else:
raise ValueError(f"{type=} not implemented")
return self
def set_organization(self, type: str = None) -> Model:
"""Set organization."""
self.organization: Organization
if type == "dict":
self.organization = OrganizationDict(
self.config, log_root=self.log_root
)
return self
def set_resource(self, type: str = None) -> Model:
"""Create resource class.
Resource
"""
self.resource: Resource
if type == "dict":
self.resource = ResourceDict(self.config, log_root=self.log_root)
return self
def set_inventory(self, type: str = None) -> Model:
"""Create Inventory management for a specific warehouse."""
self.inventory: Inventory
if type == "dict":
self.inventory = InventoryDict(self.config, log_root=self.log_root)
return self
def set_demand(self, type: str = None) -> Model:
"""Set demand.
Demand by population levels l
"""
log = self.log
self.demand: Demand
if type == "mitre":
log.debug("Use Mitre demand")
raise ValueError("{type=} not implemented")
elif type == "jhu":
log.debug("Use JHU burn rate model")
raise ValueError("{type=} not implemented")
else:
log.debug("Use default yaml dictionary data")
self.demand = DemandDict(
self.config,
res=self.resource,
pop=self.population,
log_root=self.log_root,
type=type,
)
return self
def set_filter(
self, county: str = None, state: str = None, subpop: str = None
) -> Model:
"""Filter the model.
Shrink the model to relevant population, resource
"""
self.filter = Filter(
log_root=self.log_root,
county=county,
state=state,
subpop=subpop,
)
return self
def set_financial(self, type: str = None) -> Model:
"""Create Financial model.
Financial creation
"""
log = self.log
self.financial: Financial
if type == "dict":
self.financial = FinancialDict(
self.config, log_root=self.log_root, type=type
)
elif type == "table":
self.financial = FinancialTable(
self.config, log_root=self.log_root, type=type
)
else:
log.error(f"Financial Model {type=} not implemented")
return self
def set_epi(self, type: str = None) -> Model:
"""Create Epi model.
Epi create
"""
log = self.log
self.epi: Epi
if type == "dict":
self.epi = EpiDict(self.config, log_root=self.log_root, type=type)
elif type in [
"ihme",
"delphi",
"icl",
"lanl",
"sikjalpha",
"yyg",
"chensemble",
]:
self.epi = EpiTable(self.config, log_root=self.log_root, type=type)
else:
log.error(f"Epi Model {type=} not implemented")
return self
def set_mobility(self, type: str = None) -> Model:
"""Create Behavior model.
Behavior create
"""
log = self.log
self.mobility: Mobility
if type == "dict":
self.mobility = MobilityDict(
self.config, log_root=self.log_root, type=type
)
elif type == "table":
self.mobility = MobilityTable(
self.config, log_root=self.log_root, type=type
)
else:
log.error("Behavior not implemented")
return self
# https://docs.python.org/3/library/typing.html#typing.Generator
# returns yield type, send type return type which are null
def walk(self) -> Generator[Tuple[str, Base], None, None]:
"""Walk through all Base objects in Model.
This is needed for things like Output which are called by Model
and you cannot mutually import it. Use a generator instead. Because
python cannot interpret this correctly.
"""
log = self.log
for name, value in self:
log.debug(f"{name=} {value=}")
yield name, value
def set_output(self, out: str = None, csv: str = None) -> Model:
"""Generate output."""
self.output = Output(
self.walk,
config=self.config,
log_root=self.log_root,
out=out,
csv=csv,
)
return self
# https://stackoverflow.com/questions/37835179/how-can-i-specify-the-function-type-in-my-type-hints
# https://www.datacamp.com/community/tutorials/python-iterator-tutorial
# https://towardsdatascience.com/how-to-loop-through-your-own-objects-in-python-1609c81e11ff
# So we want the iterable to be the Base Class
# The iterator is Model which can return all the Base classes
# https://thispointer.com/python-how-to-make-a-class-iterable-create-iterator-class-for-it/
def __iter__(self) -> Model:
"""Iterate through the model getting only Base objects."""
log = self.log
self.base_list: List = [
k for k, v in vars(self).items() if isinstance(v, Base)
]
log.debug(f"{self.base_list=}")
self.base_len: int = len(self.base_list)
self.base_index: int = 0
return self
def __next__(self) -> Tuple[str, Base]:
"""Next Base."""
log = self.log
if self.base_index >= self.base_len:
raise StopIteration
log.debug(f"{self.base_index=}")
key = self.base_list[self.base_index]
value = vars(self)[key]
log.debug(f"{key=} {value=}")
self.base_index += 1
return key, value
# Use the decorator pattern that Keras and other use with chaining
def set_logger(self, name: str = __name__) -> Model:
"""Set Log.
Setup the root logger and log
"""
self.log_root = Log(name)
self.log = self.log_root.log
return self | /restartus-2.5.0.1-py3-none-any.whl/restart/src/model.py | 0.841337 | 0.257279 | model.py | pypi |
from typing import Optional
from restart.src.log import Log
from restart.src.model import Model
from restart.src.util import set_config
class RestartModel:
"""Bootstrap a model object in a notebook environment."""
def __init__(
self,
population: str = "dict",
organization: Optional[str] = None,
csv: Optional[str] = None,
county: Optional[str] = None,
state: Optional[str] = None,
subpop: Optional[str] = None,
configdir: str = ".",
output: Optional[str] = None,
resource: str = "dict",
inventory: str = "dict",
demand: str = "dict",
financial: str = "dict",
mobility: str = "ensemble",
epi: str = "imhe",
):
"""Initialize a model object."""
# set up all attributes
for i in locals().items():
if i[0] != "self":
setattr(self, i[0], i[1])
# set the logging
self.name = "model"
self.log_root = Log(self.name)
self.log = log = self.log_root.log
log.propagate = False
log.debug(f"{__name__=}")
# set up the config
self.config = set_config(configdir)
self.set_model()
def set_model(self, **kwargs):
"""Bootstrap the model."""
# override defaults with keywords
for k, v in kwargs.items():
setattr(self, k, v)
# build the model
self.model = (
Model(self.name, log_root=self.log_root)
.set_configure(self.config)
.set_filter(
county=self.county, state=self.state, subpop=self.subpop
)
.set_population(type=self.population)
.set_organization(type=self.organization)
.set_resource(type=self.resource)
.set_inventory(type=self.inventory)
.set_demand(type=self.demand)
.set_financial(type=self.financial)
.set_epi(type=self.epi)
.set_mobility(type=self.mobility)
.set_output(out=self.output, csv=self.csv)
) | /restartus-2.5.0.1-py3-none-any.whl/build/lib/restart/restart.py | 0.886101 | 0.230292 | restart.py | pypi |
import datetime
import os
from pathlib import Path
from typing import Dict, Optional, Union
import confuse # type: ignore
import ipysheet # type: ignore
import numpy as np # type: ignore
import pandas as pd # type: ignore
from IPython.display import display # type: ignore
def set_config(path: str):
"""Set a confuse configuration."""
os.environ["CONFIGDIR"] = os.path.abspath(path)
config = confuse.Configuration("config")
return config
def is_dir_or_file(name: str) -> bool:
"""Is path a directory or a file.
It's hard to believe this is not a function already
"""
path = Path(name)
if path.is_dir() or path.is_file():
return True
return False
# sets the frame properly but does need to understand the model
# so goes into the model method
def set_dataframe(
arr: np.ndarray,
label: Optional[Dict],
index: Optional[str] = None,
columns: Optional[str] = None,
) -> pd.DataFrame:
"""Set the dataframe up.
Using the model data Dictionary and labels
"""
# we use get so that if there is no item it returns None
# https://www.tutorialspoint.com/python/dictionary_get.htm
df = pd.DataFrame(
arr,
index=label[index]
if label is not None and index is not None
else None,
columns=label[columns]
if label is not None and columns is not None
else None,
)
df.index.name = index
df.columns.name = columns
return df
def load_dataframe(fname: str) -> pd.DataFrame:
"""Load h5 file into a dataframe.
Args:
Name of h5 file
Returns:
The dataframe serialized in the h5 file
"""
df: pd.DataFrame = pd.read_hdf(fname, "df")
return df
def datetime_to_code(code: Union[str, datetime.datetime]) -> str:
"""Convert datetime objects to valid OCC codes.
Gets around the problem of Excel automatically converting date-looking
strings into datetime objects that can't be undone.
Args:
code: Either a datetime object or string represnting an OCC code
Returns:
The code in valid OCC code format
"""
if type(code) is datetime.datetime:
return str(code.month) + "-" + str(code.year) # type: ignore
else:
return str(code)
def to_df(sheet):
"""Shorter function call for sheet -> df."""
return ipysheet.pandas_loader.to_dataframe(sheet)
def to_sheet(df):
"""Shorter function call for df -> sheet."""
return ipysheet.pandas_loader.from_dataframe(df)
def format_cells(sheet, money=False):
"""Format ipysheet cells with specific attributes."""
for cell in sheet.cells:
setattr(cell, "read_only", True)
if money is True:
setattr(cell, "numeric_format", "$0,000")
else:
setattr(cell, "numeric_format", "0,000")
def format_population(sheet, money=False, round=False):
"""Generate a formatted sheet optimized for displaying population."""
df = to_df(sheet)
if round:
df = df.round()
index_name = "Population"
headers = list(df.index)
df.insert(loc=0, column=index_name, value=headers)
sheet = to_sheet(df)
format_cells(sheet, money)
sheet.row_headers = False
return sheet
def display_population(sheet, money=False, round=False):
"""Display sheet with specific, population-optimized formatting."""
sheet = format_population(sheet, money=money, round=round)
display(sheet) | /restartus-2.5.0.1-py3-none-any.whl/build/lib/restart/src/util.py | 0.889625 | 0.37542 | util.py | pypi |
import math
import os
from typing import Dict, List, Optional, Tuple
import confuse # type: ignore
import numpy as np # type: ignore
import pandas as pd # type: ignore
from .data import Data # type: ignore
from .filtermodel import Filter # type: ignore
from .load_csv import LoadCSV # type: ignore
from .log import Log # type: ignore
from .population import Population # type: ignore
from .util import datetime_to_code, load_dataframe # type: ignore
class PopulationOES(Population):
"""Transforms OES data into a format compatible with the model.
Performs calculations to give us an estimate of population distributions on
a county-wide basis.
Attributes:
oes_df: Dataframe containing OES data
code_df: Dataframe containing conversions between county and MSA code
pop_df: Dataframe containing census population data per county
cty_name: Name of a US county
state_name: Name of a US state
df: The processed, OES data in a dataframe
"""
def __init__(
self,
config: confuse.Configuration,
filt: Filter,
log_root: Optional[Log] = None,
):
"""Initialize.
Read the paths in and create dataframes, generate mappings
"""
super().__init__(config, log_root=log_root)
self.log_root = log_root
log = self.log
log.debug(f"module {__name__=}")
# get location and population from the filter
self.location = filt.location
try:
if self.location["county"] is not None:
self.location["county"] += " County"
except KeyError:
log.debug("invalid location input {self.location=}")
return
self.subpop = filt.subpop
self.codes: list
self.load_data(config, self.location)
def load_data(self, config, location):
"""Do most of the initializing here.
That way the stuff we don't want passed is hidden.
"""
# extract the dataframes we need from the input files
if config is not None:
source = config["Paths"].get()
source = LoadCSV(source=source).data
oes_df = load_dataframe(
os.path.join(source["Root"], source["OES"])
)
code_df = self.format_code(
load_dataframe(os.path.join(source["Root"], source["CODE"]))
)
pop_df = load_dataframe(
os.path.join(source["Root"], source["POP"])
)
xls_df = self.format_map(
load_dataframe(os.path.join(source["Root"], source["XLS"]))
)
# initialize unsliced dataframe from oes data
if location["county"] is None and location["state"] is None:
df = self.create_country_df(oes_df)
elif location["county"] is not None and location["state"] is not None:
location["county"] = location["county"]
df = self.create_county_df(location, oes_df, code_df, pop_df)
else:
df = self.create_state_df(location, oes_df)
# filter the population
if self.subpop == "healthcare":
df = self.health_filter(df)
elif self.subpop == "wa_tier2_opt1":
df = self.wa_tier2_opt1_filter(df)
elif self.subpop == "wa_tier2_opt2":
df = self.wa_tier2_opt2_filter(df)
# the actual data passed onto the model
self.pop_detail_df = self.drop_code(df)
self.population_pP_tr = Data(
"population_pP_tr",
config,
log_root=self.log_root,
p_index=list(self.pop_detail_df.index),
P_index=["Size"],
array=self.drop_code(df).to_numpy(),
)
pop_to_burn_df = self.pop_to_burn_rate(df, xls_df)
self.pop_demand_per_unit_map_pd_um: Data = Data(
"pop_demand_per_unit_map_pd_um",
config,
log_root=self.log_root,
p_index=list(pop_to_burn_df.index),
array=pop_to_burn_df.to_numpy(),
)
self.set_essential(xls_df, config)
# detail_pd_arr = detail_pd_df["Size"].to_numpy()
# self.pop_demand_per_unit_map_pd_um: Data = self.pop_to_burn_rate(
# df,
# map_df
# )
# map_labs, map_arr = self.create_map(df, map_df)
# load into dictionary
# df_dict = {}
# df_dict["detail_pd_df"] = detail_pd_df
# df_dict["detail_pd_arr"] = detail_pd_arr
# df_dict["map_labs"] = map_labs
# df_dict["map_arr"] = map_arr
def format_code(self, df: pd.DataFrame) -> pd.DataFrame:
"""Perform dataframe transformations specific to list1_2020.xls.
Args:
df: A dataframe
Returns:
A transformed dataframe to match the format needed for this project
"""
# Specify columns to bypass issues with underlining in original excel
df.columns = [
"CBSA Code",
"MDC Code",
"CSA Code",
"CBSA Title",
"Metropolitan/Micropolitan Statistical Area",
"Metropolitan Division Title",
"CSA Title",
"County Equivalent",
"State Name",
"FIPS State Code",
"FIPS County Code",
"Central/Outlying County",
]
# Select MSA, as this is what OES data is based off of
df = df[
df["Metropolitan/Micropolitan Statistical Area"]
== "Metropolitan Statistical Area"
]
# Drop data we don't need
df = df.drop(
[
"MDC Code",
"CSA Code",
"Metropolitan Division Title",
"Metropolitan/Micropolitan Statistical Area",
"CSA Title",
"FIPS State Code",
"FIPS County Code",
"Central/Outlying County",
],
axis=1,
)
# Reset indices for aesthetic appeal
df = df.reset_index(drop=True)
return df
def format_map(self, df: pd.DataFrame) -> pd.DataFrame:
"""Manually slice the excel model to get protection level mappings.
Args:
df: The excel model loaded into a dataframe
Returns:
The dataframe sliced to give the mappings
"""
# manually redo indexing and select the rows we need
# TODO: need new sheet that isn't precariously sliced like this
DF_COLUMNS = 2528
DF_START = 2529
DF_END = 3303
df.columns = df.iloc[DF_COLUMNS]
df = df.iloc[DF_START:DF_END]
df = df[
["Washington SOT", "SOC", "Type", "Level", "Essential (0 lowest)"]
]
# fix datetime objects and drop empty rows
df["SOC"] = df["SOC"].apply(datetime_to_code)
df = df.dropna(axis="rows").reset_index(drop=True)
return df
def pop_to_burn_rate(
self, df: pd.DataFrame, map_df: pd.DataFrame
) -> Tuple[list, np.ndarray]:
"""Generate mappings for OCC codes and population levels.
Args:
df: A dataframe that has OCC codes
Returns:
Dictionary of the population level mappings
"""
map_arr = []
labels = []
for code in df["occ_code"]:
arr = np.zeros(7)
try:
ind = map_df[map_df["SOC"] == code].index[0]
level = map_df.iloc[ind]["Level"]
except IndexError:
if code.startswith("29-") or code.startswith("31-"):
level = 5.5
else:
level = 3
# assign integer levels
if type(level) is int:
arr[level] = 1
# assign multiple levels
else:
arr[math.floor(level)] = 0.5
arr[math.ceil(level)] = 0.5
# add to dictionary
name = df[df["occ_code"] == code].index.tolist()[0]
labels.append(name)
map_arr.append(arr)
pop_to_level_df = pd.DataFrame(map_arr, index=labels)
return pop_to_level_df
def find_code(self, location: Dict, code_df: pd.DataFrame) -> int:
"""Finds the MSA code of given county.
Args:
None
Returns:
Integer corresponding to the given county's MSA code
"""
if code_df is None:
raise ValueError(f"{code_df=} should not be None")
return int(
code_df[
(code_df["County Equivalent"] == location["county"])
& (code_df["State Name"] == location["state"])
]["CBSA Code"].iloc[0]
)
def calculate_proportions(
self,
code: int,
location: Dict,
code_df: pd.DataFrame,
pop_df: pd.DataFrame,
) -> float:
"""Calculate county proportion relative to total MSA pop.
Args:
code: MSA code for desired county
Returns:
A float corresponding to the ratio of the county's population in
relation to its MSA code.
"""
if code_df is None:
raise ValueError(f"{code_df=} should not be None")
if pop_df is None:
raise ValueError(f"{code_df=} should not be None")
# List the counties in the same MSA code as cty_name
counties = list(
code_df[code_df["CBSA Code"] == str(code)]["County Equivalent"]
)
# Construct dictionary mapping county names to constituent populations
populations = {}
for county in counties:
pop = int(
pop_df[
(pop_df["CTYNAME"] == county)
& (pop_df["STNAME"] == location["state"])
]["POPESTIMATE2019"]
)
populations[county] = pop
# Calculate total population in MSA code
total_pop = sum(populations.values())
# Divide individual county population by total MSA population
return populations[location["county"]] / total_pop
def load_county(
self,
location: Dict,
oes_df: pd.DataFrame,
code_df: pd.DataFrame,
pop_df: pd.DataFrame,
) -> Tuple[float, pd.DataFrame]:
"""Slice the OES data by county for further processing downstream.
Args:
None
Returns:
proportion: Float corresponding to proportion of residents from
MSA code living in given county
df: Sliced OES dataframe
"""
# find county MSA CODE
code = self.find_code(location, code_df)
# calculate proportion of MSA code's residents living in county
proportion = self.calculate_proportions(
code, location, code_df, pop_df
)
# initialize dataframe as slice of OES data
df = oes_df[oes_df["area"] == code][
["occ_code", "occ_title", "o_group", "tot_emp"]
]
# replace placeholders with 0
df = df.replace(to_replace="**", value=0)
return proportion, df
def load_state(self, location: Dict, oes_df: pd.DataFrame) -> pd.DataFrame:
"""Slice the OES data by state for further processing downstream.
Args:
None
Returns:
df: Sliced OES dataframe
"""
# slice OES dataframe by state
col_list = ["occ_code", "occ_title", "o_group", "tot_emp"]
df = oes_df[(oes_df["area_title"] == location["state"])][col_list]
# replace placeholders with 0
df = df.replace(to_replace="**", value=0)
return df
def load_country(self, oes_df: pd.DataFrame) -> pd.DataFrame:
"""Get the OES data for the whole country.
The default setting for OES population
"""
# slice OES dataframe by the whole county
col_list = ["occ_code", "occ_title", "o_group", "tot_emp", "naics"]
df = oes_df[
(oes_df["area_title"] == "U.S.") & (oes_df["naics"] == "000000")
][col_list]
df = df.drop(["naics"], axis=1)
# replace placeholders with 0
df = df.replace(to_replace="**", value=0)
return df
def fill_uncounted(
self, major: pd.DataFrame, detailed: pd.DataFrame
) -> pd.DataFrame:
"""Create special categories for uncounted employees.
Args:
major: Dataframe containing totals for major OCC categories
detailed: Dataframe containing totals for detailed OCC categories
Returns:
The detailed dataframe with extra categories to account for
uncounted workers
"""
code_list = list(set(major["occ_code"]))
for code in code_list:
pat = code[0:3]
filt = detailed[detailed["occ_code"].str.startswith(pat)]
# Calculate number of employees unaccounted for within the major
# OCC code
total = int(major[major["occ_code"] == code]["tot_emp"])
det_total = np.sum(filt["tot_emp"])
delta = total - det_total
if delta > 0:
# create dataframe row and append to detailed dataframe
name = list(major[major["occ_code"] == code]["occ_title"])[0]
add_lst = [
[pat + "XXXX", "Uncounted " + name, "detailed", delta]
]
add_df = pd.DataFrame(add_lst, columns=list(major.columns))
detailed = detailed.append(add_df, ignore_index=True)
return detailed
def format_output(self, df: pd.DataFrame) -> pd.DataFrame:
"""Format dataframe to fit the model by dropping some columns.
Args:
df: The dataframe we want to format
Returns:
The formatted dataframe
"""
df = df.drop(df[df["tot_emp"] == 0].index)
df = df.drop(["o_group"], axis=1)
df = df.reset_index(drop=True)
return df
def drop_code(self, df: pd.DataFrame) -> pd.DataFrame:
"""Drop the OCC code from a dataframe.
So that it has the right format for the model.
"""
col_labs = ["Size"]
self.codes = list(df["occ_code"])
df = df.drop(["occ_code"], axis=1)
df.columns = col_labs
return df
def create_county_df(
self,
location: Dict,
oes_df: pd.DataFrame,
code_df: pd.DataFrame,
pop_df: pd.DataFrame,
) -> pd.DataFrame:
"""Generate dataframe containing processed OES data by county.
Args:
None
Returns:
The processed dataframe
"""
# Load in sliced dataframe
proportion, df = self.load_county(location, oes_df, code_df, pop_df)
# Split into 'major' and 'detailed' OCC categories
major = df[df["o_group"] == "major"].copy()
detailed = df[df["o_group"] == "detailed"].copy()
# Some detailed categories don't have information availble - remove
# these and place into "Uncounted" category
detailed = self.fill_uncounted(major, detailed)
# Adjust 'tot_emp' columns by MSA code proportion
detailed["tot_emp"] = detailed["tot_emp"].apply(
lambda x: int(x * proportion)
)
# Format to fit model
detailed = self.format_output(detailed)
detailed.set_index("occ_title", drop=True, inplace=True)
return detailed
def create_state_df(
self, location: Dict, oes_df: pd.DataFrame
) -> pd.DataFrame:
"""Generate dataframe containing processed OES data by state.
Args:
None
Returns:
The processed dataframe
"""
# Load in sliced dataframe
df = self.load_state(location, oes_df)
major = df[df["o_group"] == "major"].copy()
detailed = df[df["o_group"] == "detailed"].copy()
# Some detailed categories don't have information available - remove
# these and place into "Uncounted" category
detailed = self.fill_uncounted(major, detailed)
# Format to fit model
detailed = self.format_output(detailed)
detailed.set_index("occ_title", drop=True, inplace=True)
return detailed
def create_country_df(self, oes_df: pd.DataFrame) -> pd.DataFrame:
"""Generate dataframe containing processed OES data for US.
Args:
oes_df: Dataframe containing OES data
Returns:
The processed dataframe
"""
df = self.load_country(oes_df)
major = df[df["o_group"] == "major"].copy()
detailed = df[df["o_group"] == "detailed"].copy()
detailed = self.fill_uncounted(major, detailed)
detailed = self.format_output(detailed)
detailed.set_index("occ_title", drop=True, inplace=True)
return detailed
def health_filter(self, df: pd.DataFrame) -> pd.DataFrame:
"""Return a detailed breakdown of healthcare workers with OCC codes.
Args:
None
Returns:
Dataframe object with the detailed breakdown
"""
# 29-XXXX and 31-XXXX are the healthcare worker codes
filt = df[
(df["occ_code"].str.startswith("29-"))
| (df["occ_code"].str.startswith("31-"))
]
return filt
def wa_tier2_opt1_filter(self, df: pd.DataFrame) -> pd.DataFrame:
"""Return a detailed breakdown of Washington tier 2 workers.
Args:
None
Returns:
Dataframe object with the detailed breakdown
"""
filt = df[
(df["occ_code"].str.startswith("33-"))
| (df["occ_code"].str.startswith("29-"))
| (df["occ_code"].str.startswith("31-"))
]
return filt
def wa_tier2_opt2_filter(self, df: pd.DataFrame) -> pd.DataFrame:
"""Return a detailed breakdown of Washington tier 2 workers.
Args:
None
Returns:
Dataframe object with the detailed breakdown
"""
occ_list = [
"29-1292",
"29-2040",
"29-1215",
"29-1126",
"29-1223",
"29-1181",
"29-1221",
"31-1120",
"31-1131",
"39-4031",
"31-1132",
"39-4011",
"31-1133",
"33-2011",
"31-9091",
"33-3012",
"33-3021",
"33-9093",
"33-3041",
"33-3051",
"33-3052",
"29-2052",
]
filt = df[df["occ_code"].isin(occ_list)]
return filt
def set_essential(self, df: pd.DataFrame, config) -> pd.DataFrame:
"""Get population essential levels from the excel model.
Manually slice the dataframe
"""
# df.columns = df.iloc[2528]
# df = df.iloc[2529:3303]
# df = df[["SOC", "Essential (0 lowest)"]]
pop_level: List = []
df["SOC"] = df["SOC"].apply(datetime_to_code)
df.reset_index(drop=True, inplace=True)
for code in list(self.codes):
arr = np.zeros(2)
try:
ind = df[df["SOC"] == code].index[0]
except IndexError:
ind = -1
if ind > 0:
level = df.iloc[ind]["Essential (0 lowest)"]
else:
level = np.random.randint(0, high=6)
if level >= 5:
arr[0] = 1
else:
arr[1] = 1
pop_level.append(arr)
self.pop_to_popsum1_per_unit_map_pp1_us = Data(
"pop_to_popsum1_per_unit_map_pp1_us",
config,
log_root=self.log_root,
p_index=list(self.pop_detail_df.index),
array=np.array(pop_level),
) | /restartus-2.5.0.1-py3-none-any.whl/build/lib/restart/src/population_oes.py | 0.798894 | 0.363336 | population_oes.py | pypi |
from typing import Callable, Dict
import confuse # type: ignore
import yaml
from .base import Base # type: ignore
from .log import Log # type: ignore
class Output(Base):
"""Creates an output object.
Supply a callback that provides each model element in turn since we
we cannot pass it. For while mutual import fails so instead we need
a callback so that output and run through all the elements of model.
http://effbot.org/pyfaq/how-can-i-have-modules-that-mutually-import-each-other.htm
"""
def __init__(
self,
get_element: Callable,
config: confuse.Configuration,
log_root: Log = None,
out: str = None,
csv: str = None,
):
"""Do the initializing.
Generate the config files
"""
super().__init__(log_root=log_root)
log = self.log
log.debug(f"In {__name__}")
self.config = config
self.out = out
self.csv = csv
self.generate_config(get_element)
self.write_csv()
def generate_config(self, get_element: Callable):
"""Generate a new config yaml."""
log = self.log
new_config: Dict = {}
for section in ["Parameter", "Dimension", "Paths", "Model"]:
new_config[section] = self.config[section].get()
log.debug(f"Wrote all config data into {new_config=}")
# get the in memory data read from csv's and elsewhere or computed
# note you cannot pass the model over so you need to do this as a call
# back but the code looks like
for key, value in get_element():
# we have more in memory objects than in the Model
# so check for existance. For instance filtering is in memory only
if key in new_config["Model"]:
new_config["Model"][key]["array"] = value.array
self.write_config(new_config)
def write_config(self, config: Dict):
"""Writes config dict to yaml file."""
if self.out is not None:
with open(self.out, "w") as yamlfile:
yaml.dump(config, yamlfile)
def write_csv(self):
"""Writes to a CSV file."""
if self.csv is not None:
df = self.demand.total_demand_pn_df.copy()
# insert population into the dataframe
pop = list(self.pop.detail_pd_df["Size"])
df.insert(loc=0, column="Size", value=pop)
df.to_csv(self.csv) | /restartus-2.5.0.1-py3-none-any.whl/build/lib/restart/src/output.py | 0.788217 | 0.213131 | output.py | pypi |
import logging
from typing import Dict, Optional, Tuple
import pandas as pd # type:ignore
from .log import Log # type: ignore
class BaseLog:
"""Logging is at the very bottom."""
def __init__(self, log_root: Optional[Log] = None):
"""Set the Root Log."""
# since we have no log otherwise
self.log_root = log_root
self.log = (
log_root.log_class(self)
if log_root is not None
else logging.getLogger(__name__)
)
self.log.debug(f"{self=}")
class Base(BaseLog):
"""Base for all model classes.
Base strings and description.
"""
# do not put variable here unless you want them the same
# across all classes see https://docs.python.org/3/tutorial/classes.html
# https://stackoverflow.com/questions/9056957/correct-way-to-define-class-variables-in-python
def __init__(self, log_root: Log = None):
"""Set base varabiles.
Mainly the descriptions
"""
super().__init__(log_root=log_root)
self.description: Dict = {}
def set_description(self, name: str, description: str):
"""Set the variable description.
The descriptions are carried in each class so they are self documenting
May change this to centralized at some point.
Gets rid of the equal sign if it is there from a f string
Also only uses the last member name
"""
# we can't use a higher level logger
log: logging.Logger = logging.getLogger(__name__)
# https://stackoverflow.com/questions/18425225/getting-the-name-of-a-variable-as-a-string/58451182#58451182
# Using Python 3.8 f strings
# you must use double quotes inside single quotes for strings
log.debug(f"{object=}")
# this doesn't work, we need the real object's name so has to happen in
# caller
# name = f'{object=}'.split('=')[0]
# log.debug(f'set self.description[{name}]')
# https://stackoverflow.com/questions/521502/how-to-get-the-concrete-class-name-as-a-string
# pdb.set_trace()
class_name = self.__class__.__name__
# https://stackoverflow.com/questions/599953/how-to-remove-the-left-part-of-a-string
# clean up the name so you only get the basename after the period
# https://www.tutorialspoint.com/How-to-get-the-last-element-of-a-list-in-Python
name = name.split("=")[0].split(".")[-1]
model_name = class_name + "." + name
log.debug(f"{model_name=} {name=}")
# log.debug(f'set model.description[{model_name}]')
self.description[name] = description
# method chaining
return self
def __iter__(self):
"""Iterate over all Pandas DataFrames.
Uses a list of all frames
"""
self.df_list = [
k for k, v in vars(self).items() if isinstance(v, pd.DataFrame)
]
self.df_len = len(self.df_list)
self.df_index = 0
return self
def __next__(self) -> Tuple[str, pd.DataFrame]:
"""Next Pandas DataFrame.
Iterates through the list of dataframes
"""
if self.df_index >= self.df_len:
raise StopIteration
key = self.df_list[self.df_index]
value = vars(self)[key]
self.df_index += 1
return key, value | /restartus-2.5.0.1-py3-none-any.whl/build/lib/restart/src/base.py | 0.860486 | 0.262647 | base.py | pypi |
from typing import Optional
# Insert the classes of data we support here
import confuse # type: ignore
# Note that pip install data-science-types caused errors
from .base import Base # type: ignore
from .data import Data # type: ignore
from .log import Log # type: ignore
class Population(Base):
"""Population objects are created here.
It has a default model in it for testing which is the Bharat model
You should override it with a new child class
Population statistics and model for population
Initially this contains population of p x 1
Later it will be p x d where d are the detail columns
For instance the number of covid patients
The number of trips or visits or runs for a given population
The second matrix p population describes is how to map population to:w
l demand levels to give a p x l. Long term this becomes d x p x l
So that you can have a different run rate for each detail d of population
How are resources consumed by different levels in a population
This is the key first chart in the original model.
It takes a set of l protection levels and then for each of n resources,
provides their burn rate. So it is a dataframe that is l x n
In this first version, burn rates are per capita, that is per person in a
given level.
In a later version, we will allow different "burn rates" by population
attributes so this becomes a 3 dimensional model. For convenience, the
Frame object we have retains objects in their simple dataframe form since
it is easy to extract
For multidimenstional indices, we keep both the n-dimensional array
(tensor) and also have a method ot convert it to a multiindex for use by
Pandas
There is a default mode contained here for testing, you should override
this by creating a child class and overriding the init
We also create a friendly name and long description as document strings
eventually this will become a file we read in that is a data description
but for now it is a dictionary
"""
def __init__(self, config: confuse.Configuration, log_root: Log = None):
"""Initialize all variables.
All initialization here and uses type to determine which method to call
The default is PopulationDict which reads from the model.data
"""
# https://stackoverflow.com/questions/1385759/should-init-call-the-parent-classs-init/7059529
# to pick up the description
super().__init__(log_root=log_root)
log = self.log
log.debug("In %s", __name__)
self.config = config
# these need to be filled out by the subclasses
# define them here for type checking purposes
# And to have them instantiated for subclasses
self.population_pP_tr: Optional[Data] = None
self.pop_demand_per_unit_map_pd_um: Optional[Data] = None
self.pop_to_popsum1_per_unit_map_pp1_us: Optional[Data] = None | /restartus-2.5.0.1-py3-none-any.whl/build/lib/restart/src/population.py | 0.871993 | 0.730843 | population.py | pypi |
from __future__ import annotations
# For slices of parameters
from enum import Enum
from typing import List
import confuse # type: ignore
import numpy as np # type: ignore
from .base import Base # type: ignore
from .data import Data # type: ignore
from .log import Log # type: ignore
# https://docs.python.org/3/library/enum.html
# These are the slices used
class InvParam(Enum):
"""List positions in Inventory Parameter List."""
INIT = 0
EOQ = 1
MIN = 2
class Inventory(Base):
"""Inventory - Manages all the inventorys that are used in the model."""
def __init__(
self,
config: confuse.Configuration,
log_root: Log = None,
):
"""Initialize the Inventorys.
Does a read in parameters
"""
# initialize logging and description
super().__init__(log_root=log_root)
log = self.log
self.config = config
log.debug(f"in {__name__}")
self.inv_by_popsum1_total_rp1n_tc = Data(
"inv_by_popsum1_total_rp1n_tc", config, log_root=log_root
)
self.inv_by_popsum1_param_iIp1n_tp = Data(
"inv_by_popsum1_param_iIp1n_tp", config, log_root=log_root
)
log.debug(f"{self.inv_by_popsum1_param_iIp1n_tp.df=}")
# TODO: This should be taken from the param file
self.inv_init_by_popsum1_total_rp1n_tc = Data(
"inv_init_by_popsum1_total_rp1n_tc", config, log_root=log_root
)
log.debug(f"set inv to {self.inv_init_by_popsum1_total_rp1n_tc=}")
self.inv_eoq_by_popsum1_total_rp1n_tc = Data(
"inv_eoq_by_popsum1_total_rp1n_tc", config, log_root=log_root
)
self.inv_min_by_popsum1_total_rp1n_tc = Data(
"inv_min_by_popsum1_total_rp1n_tc", config, log_root=log_root
)
# Helpers to handle period calculations
self.inv_min_by_popsum1_in_periods_rp1n_pc = Data(
"inv_min_by_popsum1_in_periods_rp1n_pc", config, log_root=log_root
)
self.inv_average_orders_by_popsum1_per_period_rp1n_uf = Data(
"inv_average_orders_by_popsum1_per_period_rp1n_uf",
config,
log_root=log_root,
)
self.inv_order_by_popsum1_total_rp1n_tc = Data(
"inv_order_by_popsum1_total_rp1n_tc", config, log_root=log_root
)
# can only set minmum once inv_min exists and order too
self.set_min(self.inv_init_by_popsum1_total_rp1n_tc)
def set_average_orders_per_period(
self, inv_average_orders_by_popsum1_per_period_rp1n_uf: Data
):
"""Set Average Inventory Used Every Period.
This could just be a simple set but leave here for clarity
"""
self.inv_average_orders_by_popsum1_per_period_rp1n_uf = (
inv_average_orders_by_popsum1_per_period_rp1n_uf
)
def set_min_in_periods(
self,
min_periods_r_pc: List,
) -> Inventory:
"""Sets the Minimum Inventory as measured in Average Days Shipments.
A Helper function that fill out an entire array and then passes it
down
"""
log = self.log
log.debug(f"{min_periods_r_pc=}")
self.inv_min_by_popsum1_in_periods_rp1n_pc.array = np.einsum(
"r,rxn->rxn",
min_periods_r_pc,
np.ones_like(self.inv_min_by_popsum1_in_periods_rp1n_pc.array),
)
self.set_min_in_periods_array(
self.inv_min_by_popsum1_in_periods_rp1n_pc
)
return self
def set_min_in_periods_array(
self,
min_periods_rp1n_tc: Data,
) -> Inventory:
"""Set with an array that is for all resources in periods."""
log = self.log
self.inv_min_by_popsum1_in_periods_rp1n_pc.array = (
min_periods_rp1n_tc.array
)
log.debug(f"{self.inv_min_by_popsum1_in_periods_rp1n_pc.df=} ")
# https://numpy.org/doc/stable/reference/generated/numpy.empty_like.html
# note we need r=1 for this to work so we insert an empty dimension
# https://numpy.org/doc/stable/reference/generated/numpy.expand_dims.html
# needed this before we started calling with full range
# self.inv_min_by_popsum1_per_period_rp1n_uc.array = np.expand_dims(
# self.inv_min_by_popsum1_per_period_rp1n_uc.array, axis=0
# )
# need to do a dot product
self.inv_min_by_popsum1_total_rp1n_tc.array = np.einsum(
"rxn,rxn->rxn",
min_periods_rp1n_tc.array,
self.inv_average_orders_by_popsum1_per_period_rp1n_uf.array,
)
self.set_min(self.inv_min_by_popsum1_total_rp1n_tc)
return self
def set_min(self, min_by_popsum1_total_rp1n_tc: Data) -> Inventory:
"""Set the minimum inventory in periods_r.
This sets the minimum inventory and then forces an order in case we are
below the minimum
"""
log = self.log
# https://stackoverflow.com/questions/53375161/use-numpy-array-to-replace-pandas-dataframe-values
self.inv_by_popsum1_total_rp1n_tc.array = (
min_by_popsum1_total_rp1n_tc.array
)
log.debug(f"{self.inv_by_popsum1_total_rp1n_tc.df=}")
self.supply_order()
return self
def supply_order(self) -> Inventory:
"""Order from supplier.
Order up to the minimum inventory
"""
# hack here because we only do ranges for min inventory
self.inv_order_by_popsum1_total_rp1n_tc.array = (
self.inv_min_by_popsum1_total_rp1n_tc.array
- self.inv_by_popsum1_total_rp1n_tc.array
)
# negative means we have inventory above safety levels
# so get rid of those
# https://www.w3inventory.com/python-exercises/numpy/python-numpy-exercise-90.php
self.inv_order_by_popsum1_total_rp1n_tc.array[
self.inv_order_by_popsum1_total_rp1n_tc.array < 0
] = 0
# now gross up the order to the economic order quantity
self.round_up_to_eoq(self.inv_order_by_popsum1_total_rp1n_tc)
self.log.debug(f"{self.inv_order_by_popsum1_total_rp1n_tc.df=}")
# now that we have an order rounded up and ready, let's get supply
self.fulfill(self.inv_order_by_popsum1_total_rp1n_tc)
return self
# https://stackoverflow.com/questions/2272149/round-to-5-or-other-number-in-python
def round_up_to_eoq(self, order_by_popsum1_total_rp1n_tc: Data) -> Data:
"""Round order up the economic order quantity.
Each order needs to get rounded up to an economic quantity
"""
if np.any(self.inv_eoq_by_popsum1_total_rp1n_tc.array <= 0):
raise ValueError(
f"EOQ not positive {self.inv_eoq_by_popsum1_total_rp1n_tc.df=}"
)
if np.any(order_by_popsum1_total_rp1n_tc.array < 0):
raise ValueError(
f"Negative order in {order_by_popsum1_total_rp1n_tc.df=}"
)
# So take the order and then get the distance to the eoc
# by using modulo
# https://stackoverflow.com/questions/50767452/check-if-dataframe-has-a-zero-element
# https://numpy.org/doc/stable/reference/generated/numpy.any.html
# https://softwareengineering.stackexchange.com/questions/225956/python-assert-vs-if-return
# do not use asserts they are stripped with optimization, raise errors
return (
order_by_popsum1_total_rp1n_tc.array
+ (
self.inv_eoq_by_popsum1_total_rp1n_tc.array
- order_by_popsum1_total_rp1n_tc.array
)
% self.inv_eoq_by_popsum1_total_rp1n_tc.array
)
def fulfill(self, order_by_popsum1_total_rp1n_tc: Data):
"""Fulfill an order form supplier.
This is a stub in that all orders are immediatley fulfilled
"""
log = self.log
log.debug(f"fulfill {order_by_popsum1_total_rp1n_tc=}")
self.inv_by_popsum1_total_rp1n_tc.array += (
order_by_popsum1_total_rp1n_tc.array
)
log.debug(f"{self.inv_by_popsum1_total_rp1n_tc.df=}")
def order(self, order_by_popsum1_total_rp1n_tc: Data) -> Data:
"""Order by Customer from Inventory.
Take a new order and then return what you can
It will check what is in inventory and then call the delivery method
returns: whats available to ship
"""
# Return as much as we can so if the order is bigger than
# the inventory, just ship it all out.
# the simple min won't work, need an element0-wise minimum
# https://numpy.org/doc/stable/reference/generated/numpy.minimum.html
self.inv_order_by_popsum1_total_rp1n_tc.array = np.minimum(
order_by_popsum1_total_rp1n_tc.array,
self.inv_by_popsum1_total_rp1n_tc.array,
)
# ship it!
self.inv_by_popsum1_total_rp1n_tc.array -= (
self.inv_order_by_popsum1_total_rp1n_tc.array
)
# now restock
self.supply_order()
return self.inv_order_by_popsum1_total_rp1n_tc | /restartus-2.5.0.1-py3-none-any.whl/build/lib/restart/src/inventory.py | 0.7586 | 0.272272 | inventory.py | pypi |
import os
from typing import Dict, List, Optional
import pandas as pd # type:ignore
from .load import Load # type: ignore
from .log import Log # type: ignore
class LoadCSV(Load):
"""Converts Excel and CSV files into dataframe objects.
If you give it files with a .xlsx, .xls, or .csv extension, it will read
their data into a dataframe, and then safe the dataframe as
a h5 file with extension .h5. If you feed this class
a h5 file, it will simply pass through this class. This is
done so that we can minimize the amount of times the Excel/CSV
data must be processed - for larger files, it can be lengthy.
Attributes:
excel_ext: list of extensions attached to excel files
csv_ext: list of extensions attached to csv files
data: dictionary containing names of h5 files
"""
def __init__(
self,
source: Dict = None,
log_root: Optional[Log] = None,
excel_ext: List[str] = [".xlsx", ".xls"],
csv_ext: List[str] = [".csv"],
):
"""Initialize the Loader to read files.
Reads the files
"""
# logging setup
super().__init__(log_root=log_root)
log = self.log
log.debug(f"{self.log=} {log=}")
log.debug(f"module {__name__=}")
# extensions we check for
self.excel_ext = excel_ext
self.csv_ext = csv_ext
if source is None:
raise ValueError(f"{source=} should not be None")
try:
if source["Root"] is None:
raise ValueError(f"need root directory in {source=}")
except KeyError:
log.debug(f"{source=} invalid config")
return None
# read all files in the given root directory
files = os.listdir(source["Root"])
rootdir = source["Root"]
self.data: Dict = source
for fname in source:
# skip root key
if not fname == "Root":
path = source[fname]
log.debug(f"{path=}")
# split paths into name + extension
base, ext = os.path.splitext(path)
fullbase = os.path.join(rootdir, base)
try:
# look for h5 file in rootdir
if base + ".h5" in files:
log.debug(f"preexisting json found for {base=}")
self.data[fname] = base + ".h5"
else:
log.debug(f"generating h5 file for {base=}")
# excel to dataframe
if ext in self.excel_ext:
log.debug(f"loading {ext=} file")
df = pd.read_excel(fullbase + ext)
# csv to dataframe
elif ext in self.csv_ext:
log.debug(f"loading {ext=} file")
df = pd.read_csv(fullbase + ext)
else:
raise ValueError(f"{fname=} extension invalid")
# store dataframe and overwrite dictionary input
self.store_dataframe(fullbase, df)
self.data[fname] = base + ".h5"
# handle alternate utf encodings
except UnicodeDecodeError:
log.debug(f"loading {ext=} file with ISO-8859-1 encoding")
df = pd.read_csv(fullbase + ext, encoding="ISO-8859-1")
self.store_dataframe(fullbase, df)
self.data[fname] = base + ".h5"
def store_dataframe(self, name: str, df: pd.DataFrame) -> None:
"""Serializes a dataframe in h5 format.
Args:
name: name of the file we want to save
Returns:
None
"""
log = self.log
name = name + ".h5"
log.debug(f"{name=}")
df.to_hdf(name, key="df", mode="w")
return None | /restartus-2.5.0.1-py3-none-any.whl/build/lib/restart/src/load_csv.py | 0.669529 | 0.221582 | load_csv.py | pypi |
from __future__ import annotations
from typing import Generator, List, Optional, Tuple
from .base import Base # type: ignore
from .demand import Demand # type: ignore
from .demand_dict import DemandDict # type: ignore
from .epi import Epi # type: ignore
from .epi_dict import EpiDict # type: ignore
from .epi_table import EpiTable # type: ignore
from .filtermodel import Filter # type: ignore
from .financial import Financial # type: ignore
from .financial_dict import FinancialDict # type: ignore
from .financial_table import FinancialTable # type: ignore
from .inventory import Inventory # type: ignore
from .inventory_dict import InventoryDict # type: ignore
from .log import Log # type: ignore
from .mobility import Mobility # type: ignore
from .mobility_dict import MobilityDict # type: ignore
from .mobility_table import MobilityTable # type: ignore
from .organization import Organization # type: ignore
from .organization_dict import OrganizationDict # type: ignore
from .output import Output # type: ignore
from .population import Population # type: ignore
from .population_dict import PopulationDict # type: ignore
from .population_oes import PopulationOES # type: ignore
from .population_wa import PopulationWA # type: ignore
from .resource_dict import ResourceDict # type: ignore
from .resourcemodel import Resource # type: ignore
class Model(Base):
"""Main model for planning.
They then will create the correct tables for use by the main computation
This model has pointers to the major elements of the model.
The model is made up of a series of classes for each of the major model
elements. Each of the major model elements are based on a class structure
that are kept in a network of links in the Model class. The Model acts as a
single global data structure for the entire project.
It uses chaining so that you can in a single statement set all the modeling
elements. These include:
Real resources. Like Populations or Organizations
Transforms. Which computes mapping of Population onto say Resources
Actions. These are things that affect Real Objects like Demand.
- LogBase. Just for logging so every class that wants to log needs this as
a base class
- Base. This has the descriptions baked in. Used to traverse the Model
class
when you want to print or interrogate the Model.
- Resource. Every time you create a new way to read or manage resources,
base on this. en.utf-8.add
"""
# https://satran.in/b/python--dangerous-default-value-as-argument
# https://stackoverflow.com/questions/2…
# do not do default assignment, it remembers it on eash call
# https://docs.python.org/3/library/typing.html
def __init__(self, name, log_root: Optional[Log] = None):
"""Initialize the model."""
# the long description of each
# https://stackoverflow.com/questions/1385759/should-init-call-the-parent-classs-init/7059529
super().__init__(log_root=log_root)
log = self.log
log.debug(f"{__name__=}")
self.name: str = name
if not log.hasHandlers():
print(f"{log=} has no handlers")
log.debug(f"{self.name=}")
def set_configure(self, config) -> Model:
"""Configure the Model.
Uses Loaded as a dictionary and puts it into model variables
"""
log = self.log
self.config = config
log.debug(f"{self.config=}")
return self
# TODO: This should be a generated set of methods as they are all identical
def set_population(self, type: str = None) -> Model:
"""Create population class for model.
Population created here
"""
# the old method
# self.population = Population(
# self.data, log_root=self.log_root, type=type
# )
# the super class population uses type to return the exact model
# filter is by happens after this
self.population: Population
self.filter: Filter
if type == "oes":
self.population = PopulationOES(
self.config,
self.filter,
log_root=self.log_root,
)
elif type == "wa":
self.population = PopulationWA(
self.config, self.filter, log_root=self.log_root
)
elif type == "dict":
# change this to the the naming of columns
self.population = PopulationDict(
self.config,
log_root=self.log_root,
)
else:
raise ValueError(f"{type=} not implemented")
return self
def set_organization(self, type: str = None) -> Model:
"""Set organization."""
self.organization: Organization
if type == "dict":
self.organization = OrganizationDict(
self.config, log_root=self.log_root
)
return self
def set_resource(self, type: str = None) -> Model:
"""Create resource class.
Resource
"""
self.resource: Resource
if type == "dict":
self.resource = ResourceDict(self.config, log_root=self.log_root)
return self
def set_inventory(self, type: str = None) -> Model:
"""Create Inventory management for a specific warehouse."""
self.inventory: Inventory
if type == "dict":
self.inventory = InventoryDict(self.config, log_root=self.log_root)
return self
def set_demand(self, type: str = None) -> Model:
"""Set demand.
Demand by population levels l
"""
log = self.log
self.demand: Demand
if type == "mitre":
log.debug("Use Mitre demand")
raise ValueError("{type=} not implemented")
elif type == "jhu":
log.debug("Use JHU burn rate model")
raise ValueError("{type=} not implemented")
else:
log.debug("Use default yaml dictionary data")
self.demand = DemandDict(
self.config,
res=self.resource,
pop=self.population,
log_root=self.log_root,
type=type,
)
return self
def set_filter(
self, county: str = None, state: str = None, subpop: str = None
) -> Model:
"""Filter the model.
Shrink the model to relevant population, resource
"""
self.filter = Filter(
log_root=self.log_root,
county=county,
state=state,
subpop=subpop,
)
return self
def set_financial(self, type: str = None) -> Model:
"""Create Financial model.
Financial creation
"""
log = self.log
self.financial: Financial
if type == "dict":
self.financial = FinancialDict(
self.config, log_root=self.log_root, type=type
)
elif type == "table":
self.financial = FinancialTable(
self.config, log_root=self.log_root, type=type
)
else:
log.error(f"Financial Model {type=} not implemented")
return self
def set_epi(self, type: str = None) -> Model:
"""Create Epi model.
Epi create
"""
log = self.log
self.epi: Epi
if type == "dict":
self.epi = EpiDict(self.config, log_root=self.log_root, type=type)
elif type in [
"ihme",
"delphi",
"icl",
"lanl",
"sikjalpha",
"yyg",
"chensemble",
]:
self.epi = EpiTable(self.config, log_root=self.log_root, type=type)
else:
log.error(f"Epi Model {type=} not implemented")
return self
def set_mobility(self, type: str = None) -> Model:
"""Create Behavior model.
Behavior create
"""
log = self.log
self.mobility: Mobility
if type == "dict":
self.mobility = MobilityDict(
self.config, log_root=self.log_root, type=type
)
elif type == "table":
self.mobility = MobilityTable(
self.config, log_root=self.log_root, type=type
)
else:
log.error("Behavior not implemented")
return self
# https://docs.python.org/3/library/typing.html#typing.Generator
# returns yield type, send type return type which are null
def walk(self) -> Generator[Tuple[str, Base], None, None]:
"""Walk through all Base objects in Model.
This is needed for things like Output which are called by Model
and you cannot mutually import it. Use a generator instead. Because
python cannot interpret this correctly.
"""
log = self.log
for name, value in self:
log.debug(f"{name=} {value=}")
yield name, value
def set_output(self, out: str = None, csv: str = None) -> Model:
"""Generate output."""
self.output = Output(
self.walk,
config=self.config,
log_root=self.log_root,
out=out,
csv=csv,
)
return self
# https://stackoverflow.com/questions/37835179/how-can-i-specify-the-function-type-in-my-type-hints
# https://www.datacamp.com/community/tutorials/python-iterator-tutorial
# https://towardsdatascience.com/how-to-loop-through-your-own-objects-in-python-1609c81e11ff
# So we want the iterable to be the Base Class
# The iterator is Model which can return all the Base classes
# https://thispointer.com/python-how-to-make-a-class-iterable-create-iterator-class-for-it/
def __iter__(self) -> Model:
"""Iterate through the model getting only Base objects."""
log = self.log
self.base_list: List = [
k for k, v in vars(self).items() if isinstance(v, Base)
]
log.debug(f"{self.base_list=}")
self.base_len: int = len(self.base_list)
self.base_index: int = 0
return self
def __next__(self) -> Tuple[str, Base]:
"""Next Base."""
log = self.log
if self.base_index >= self.base_len:
raise StopIteration
log.debug(f"{self.base_index=}")
key = self.base_list[self.base_index]
value = vars(self)[key]
log.debug(f"{key=} {value=}")
self.base_index += 1
return key, value
# Use the decorator pattern that Keras and other use with chaining
def set_logger(self, name: str = __name__) -> Model:
"""Set Log.
Setup the root logger and log
"""
self.log_root = Log(name)
self.log = self.log_root.log
return self | /restartus-2.5.0.1-py3-none-any.whl/build/lib/restart/src/model.py | 0.841337 | 0.257279 | model.py | pypi |
[](https://developer.cisco.com/codeexchange/github/repo/muhammad-rafi/restconf-cli)
[](https://github.com/muhammad-rafi/conf_diff/actions)
[](https://github.com/muhammad-rafi/conf_diff/actions)
[](https://pypi.org/project/restconf-cli/)
# RESTCONF Command Line Interface (restconf-cli)
## Introduction
restconf-cli is a command line interface application which interacts with the restconf enabled devices (i.e. iosxe, nxos, nso). This module uses Python `Click` module for command line interface (CLI) and `rich` module for the colorful output. It is built on top of Python `requests` library to make Restconf API calls. Here are the key information for this modules.
- Base URL used 'https://<hostname/ip>:<port>/restconf/data/'
- Default port used for the Restconf API 443
- Default Headers used for Accept and Content-Type are the following.
Accept: application/yang-data+json
Content-Type: application/yang-data+json
Since default headers are using 'application/yang-data+json', therefore, the output will be in following formats for the below type of devices unless specified in the table below.
| Device Type | IOSXE | NXOS | NSO |
| :----------------------------: | :----: | :---: | :---: |
| Default Accept Header | application/yang-data+json | application/yang-data+json | application/yang-data+json |
| Default Content-Type Header | application/yang-data+json | application/yang-data+json | application/yang-data+json |
| Default Output Format | JSON | XML | JSON |
Same for the POST, PUT and PATCH operations, if you do not specify the header fields, it assumes that you are sending the data in the formats mentioned above.
- Currently tested/supported on Python 3.8, 3.9 and 3.10
__Disclaimer:__ This module uses Insecure Requests which is not recommended, use
certificates where possible.
## Installation
You can download this module from PyPi repository via PIP
To install a module, simply type
```bash
pip install restconf-cli
```
__Note:__ It is also recommended that use the virtual environment for any package you are testing.
```bash
(main) expert@expert-cws:~/venvs$ python -m venv .venv
(main) expert@expert-cws:~/venvs$ source .venv/bin/activate
(.venv) expert@expert-cws:~/venvs$ pip install restconf-cli
```
## Usage
Once you have installed the `restconf-cli` package, you can test this against any Cisco IOSXE, NXOS and NSO device. I have not tested for any other devices, but if you come across any device where this is working or not, feel free to raise an issue or send a pull request.
Let's first explore the documentation
Run `restconf-cli --help` on the terminal for help text
```bash
(.venv) expert@expert-cws:~$ restconf-cli --help
Usage: restconf-cli [OPTIONS] COMMAND [ARGS]...
CLI tool to interact with the restconf APIs currently supported for IOSXE,
NXOS and NSO.
This library uses the following root URL for the restconf with port 443 as default port.
https://<hostname/ip>:<port>/restconf/data/
Default Headers for Accept and Content-Type are the following.
Accept: application/yang-data+json
Content-Type: application/yang-data+json
Since default headers are using 'application/yang-data+json',
therefore, you will retrieve the output in following formats
for the below type of devices unless specified for the GET operation.
| Device Type | IOSXE | NXOS | NSO |
| :-------------------: | :--------------------------: | :--------------------------: | :-------------------------: |
| Default Accept | application/yang-data+json | application/yang-data+json | application/yang-data+json |
| Default Content-Type | application/yang-data+json | application/yang-data+json | application/yang-data+json |
| Default Output Format | JSON | XML | JSON |
Same for the POST, PUT and PATCH operation if you do not specify the
header fields, it assumes you are sending the data in the formats
mentioned above.
Disclaimer: This module uses Insecure Requests which is not recommended, use
certificates where possible.
Options:
--help Show this message and exit.
Commands:
DELETE Method to delete the target resource Example: $ restconf-cli...
GET Method to retrieve operational or config data from the devices.
PATCH same as PUT, except if the resource does not exist, the devices...
POST Sends data to the devices to create a new data resource.
PUT Send data to the devices to create or update the data resource.
```
You can read all the above information, but this is more or less same info which already exist in this readme file. The important key of information here, are the commands, as you can see there are about 5 commands available which are basically CRUD operation for the Restconf API. We can again `-h` or `--help` flag to see the information inside each of these commands. Let's check each of them and see what command options are available.
For GET operation command options, simply run `restconf-cli GET --help` or `restconf-cli GET -h`
```bash
(.venv) expert@expert-cws:~$ restconf-cli GET --help
Usage: restconf-cli GET [OPTIONS]
Method to retrieve operational or config data from the devices.
Default header for the requests are 'application/yang-data+json'
Example:
# Display output on the terminal
$ restconf-cli GET -u developer -n sandbox-iosxe-latest-1.cisco.com \
-p Cisco-IOS-XE-native:native/version \
-a application/yang-data+json \
-c application/yang-data+json
# Display output on the terminal and save the output on a file defined with --output or -o flag
$ restconf-cli GET -u developer -n sandbox-iosxe-latest-1.cisco.com \
-p Cisco-IOS-XE-native:native/interface \
-o output.json
Options:
-o, --output FILENAME Output will be written to a file
-c, --content-type TEXT Content-Type header for restconf api, default is
application/yang-data+json
-a, --accept TEXT Accept header for restconf api, default is
application/yang-data+json
-pn, --port INTEGER Port number for restconf api, default is 443
-p, --path TEXT Path for restconf api call [required]
--password TEXT Password for restconf api
-u, --username TEXT Username for restconf api [required]
-n, --hostname TEXT Device hostname or IP address for the restconf API
[required]
-h, --help Show this message and exit.
```
For POST operation command options, run `restconf-cli POST --help` or `restconf-cli POST -h`
```bash
(.venv) expert@expert-cws:~$ restconf-cli POST -h
Usage: restconf-cli POST [OPTIONS]
Sends data to the devices to create a new data resource.
Example:
# Configure via raw data for POST operation
$ restconf-cli POST -u developer -n sandbox-iosxe-latest-1.cisco.com \
-p ietf-interfaces:interfaces \
-d '{
"interface":[
{
"name":"Loopback999",
"description":"created by python click - POST",
"type":"iana-if-type:softwareLoopback",
"enabled":true,
"ietf-ip:ipv4":{
"address":[
{
"ip":"10.0.1.10",
"netmask":"255.255.255.255"
}
]
}
}
]
}'
# Configure from file for POST operation
$ restconf-cli POST -u developer \
-n sandbox-iosxe-latest-1.cisco.com \
-p ietf-interfaces:interfaces -ff interface.json
Options:
-ff, --from-file FILENAME Read the playload from file for POST operation
-d, --data TEXT Playload to be sent for POST, PUT and PATCH
methods
-c, --content-type TEXT Content-Type header for restconf api, default is
application/yang-data+json
-a, --accept TEXT Accept header for restconf api, default is
application/yang-data+json
-pn, --port INTEGER Port number for restconf api, default is 443
-p, --path TEXT Path for restconf api call [required]
--password TEXT Password for restconf api
-u, --username TEXT Username for restconf api [required]
-n, --hostname TEXT Device hostname or IP address for the restconf
API [required]
-h, --help Show this message and exit.
```
For PUT operation command options, run `restconf-cli PUT --help` or `restconf-cli PUT -h`
```bash
(.venv) expert@expert-cws:~$ restconf-cli PUT -h
Usage: restconf-cli PUT [OPTIONS]
Send data to the devices to create or update the data resource.
Example:
# Configure via raw data for PUT operation
$ restconf-cli PUT -u developer -n sandbox-iosxe-latest-1.cisco.com \
-p ietf-interfaces:interfaces \
-d '{
"interface":[
{
"name":"Loopback999",
"description":"created by python click - PUT",
"type":"iana-if-type:softwareLoopback",
"enabled":true,
"ietf-ip:ipv4":{
"address":[
{
"ip":"10.0.1.10",
"netmask":"255.255.255.255"
}
]
}
}
]
}'
# Configure from file for PUT operation
$ restconf-cli PUT -u developer \
-n sandbox-iosxe-latest-1.cisco.com \
-p ietf-interfaces:interfaces/interface=Loopback999 -ff interface.json
Options:
-ff, --from-file FILENAME Read the playload from file for PUT operation
-d, --data TEXT Playload to be sent for POST, PUT and PATCH
methods
-c, --content-type TEXT Content-Type header for restconf api, default is
application/yang-data+json
-a, --accept TEXT Accept header for restconf api, default is
application/yang-data+json
-pn, --port INTEGER Port number for restconf api, default is 443
-p, --path TEXT Path for restconf api call [required]
--password TEXT Password for restconf api
-u, --username TEXT Username for restconf api [required]
-n, --hostname TEXT Device hostname or IP address for the restconf
API [required]
-h, --help Show this message and exit.
```
For PATCH operation command options, run `restconf-cli PATCH --help` or `restconf-cli PATCH -h`
```bash
(.venv) expert@expert-cws:~$ restconf-cli PATCH -h
Usage: restconf-cli PATCH [OPTIONS]
same as PUT, except if the resource does not exist, the devices MUST NOT
create one.
Example:
# Configure via raw data for PATCH operation
$ restconf-cli PATCH -u developer -n sandbox-iosxe-latest-1.cisco.com \
-p ietf-interfaces:interfaces \
-d '{
"interface":[
{
"name":"Loopback999",
"description":"created by python click - PATCH",
"type":"iana-if-type:softwareLoopback",
"enabled":true,
"ietf-ip:ipv4":{
"address":[
{
"ip":"10.0.1.10",
"netmask":"255.255.255.255"
}
]
}
}
]
}'
# Configure from file for PATCH operation
$ restconf-cli PATCH -u developer \
-n sandbox-iosxe-latest-1.cisco.com \
-p ietf-interfaces:interfaces/interface=Loopback999 -ff interface.json
Options:
-ff, --from-file FILENAME Read the playload from file for PATCH operation
-d, --data TEXT Playload to be sent for POST, PUT and PATCH
methods
-c, --content-type TEXT Content-Type header for restconf api, default is
application/yang-data+json
-a, --accept TEXT Accept header for restconf api, default is
application/yang-data+json
-pn, --port INTEGER Port number for restconf api, default is 443
-p, --path TEXT Path for restconf api call [required]
--password TEXT Password for restconf api
-u, --username TEXT Username for restconf api [required]
-n, --hostname TEXT Device hostname or IP address for the restconf
API [required]
-h, --help Show this message and exit.
```
For DELETE operation command options, run `restconf-cli DELETE --help` or `restconf-cli DELETE -h`
```bash
(.venv) expert@expert-cws:~$ restconf-cli DELETE -h
Usage: restconf-cli DELETE [OPTIONS]
Method to delete the target resource
Example:
$ restconf-cli DELETE -u developer -n sandbox-iosxe-latest-1.cisco.com \
-p ietf-interfaces:interfaces/interface=Loopback999
Options:
-n, --hostname TEXT Device hostname or IP address for the restconf API
[required]
-u, --username TEXT Username for restconf api [required]
--password TEXT Password for restconf api
-p, --path TEXT Path for restconf api call [required]
-pn, --port INTEGER Port number for restconf api, default is 443
-a, --accept TEXT Accept header for restconf api, default is
application/yang-data+json
-c, --content-type TEXT Content-Type header for restconf api, default is
application/yang-data+json
-h, --help Show this message and exit.
```
Notice there are some examples mentioned in the above output for every command, we will explore these in the next section.
## Examples
For the sake of testing, I am going to use Cisco Always-on IOSXE device `sandbox-iosxe-latest-1.cisco.com` which uses the `443` default port for Restconf and both Accept and Content-type headers are `application/yang-data+json`, which makes our CLI command earsier as these options are default for `restconf-cli`.
```
restconf-cli GET -u developer -n sandbox-iosxe-latest-1.cisco.com -p Cisco-IOS-XE-native:native/version
```
```bash
(.venv) expert@expert-cws:~$ restconf-cli GET -u developer -n sandbox-iosxe-latest-1.cisco.com -p Cisco-IOS-XE-native:native/version
Password:
{
"Cisco-IOS-XE-native:version": "17.3"
}
Status: 200 OK
```
The output will be colorful as `restconf-cli` cli uses the rich module to print colorful output.
For more examples, please check the [examples](examples) folder.
## Issues
Please raise an issue or pull request if you find something wrong with this module.
## Authors
[Muhammad Rafi](https://www.linkedin.com/in/muhammad-rafi-0a37a248/)
## References
https://click.palletsprojects.com/en/8.1.x/
https://click.palletsprojects.com/en/7.x/changelog/#version-7-1-2
| /restconf-cli-0.1.5.tar.gz/restconf-cli-0.1.5/README.md | 0.594669 | 0.795698 | README.md | pypi |
import click
import requests
from rich import print
# Disable InsecureRequestWarning: Unverified HTTPS request is being made.
requests.packages.urllib3.disable_warnings()
# Creating click group as parent function for other click commands to be attached with
@click.group()
def restconf_cli():
""" CLI tool to interact with the restconf APIs currently supported
for IOSXE, NXOS and NSO.
\b
This library uses the following root URL for the restconf with port 443 as default port.
https://<hostname/ip>:<port>/restconf/data/
Default Headers for Accept and Content-Type are the following. \n
\b
Accept: application/yang-data+json
Content-Type: application/yang-data+json
\b
Since default headers are using 'application/yang-data+json',
therefore, you will retrieve the output in following formats
for the below type of devices unless specified for the GET operation.
\b
| Device Type | IOSXE | NXOS | NSO |
| :-------------------: | :--------------------------: | :--------------------------: | :-------------------------: |
| Default Accept | application/yang-data+json | application/yang-data+json | application/yang-data+json |
| Default Content-Type | application/yang-data+json | application/yang-data+json | application/yang-data+json |
| Default Output Format | JSON | XML | JSON |
\b
Same for the POST, PUT and PATCH operation if you do not specify the
header fields, it assumes you are sending the data in the formats
mentioned above.
\b
Disclaimer: This module uses Insecure Requests which is not recommended, use
certificates where possible.
"""
pass
# Click command 'GET'
@click.option("--hostname", "-n", type=str, required=True, help="Device hostname or IP address for the restconf API")
@click.option("--username", "-u", type=str, required=True, help="Username for restconf api")
@click.option('--password', prompt=True, hide_input=True, confirmation_prompt=False, help="Password for restconf api")
@click.option("--path", "-p", type=str, required=True, help="Path for restconf api call")
@click.option("--port", "-pn", type=int, required=False, default=443, help="Port number for restconf api, default is 443")
@click.option("--accept", "-a", type=str, required=False, default='application/yang-data+json', help="Accept header for restconf api, default is application/yang-data+json")
@click.option("--content-type", "-c", type=str, required=False, default='application/yang-data+json', help="Content-Type header for restconf api, default is application/yang-data+json")
@click.option("--output", "-o", type=click.File('w'), required=False, default=None, help="Output will be written to a file")
@click.command(name="GET", context_settings=dict(help_option_names=['-h', '--help']))
def restconf_get(hostname, username, password, path, port, accept, content_type, output):
"""
Method to retrieve operational or config data from the devices. \n
Default header for the requests are 'application/yang-data+json'
Examples:\n
\b
# Display output on the terminal \b
$ restconf-cli GET -u developer -n sandbox-iosxe-latest-1.cisco.com \ \b
-p Cisco-IOS-XE-native:native/version \ \b
-a application/yang-data+json \ \b
-c application/yang-data+json \b
\b
# Display output on the terminal and save the output on a file defined with --output or -o flag \b
$ restconf-cli GET -u developer -n sandbox-iosxe-latest-1.cisco.com \ \b
-p Cisco-IOS-XE-native:native/interface \ \b
-o interfaces.json
"""
try:
headers = {'Accept': accept, 'Content-Type': content_type}
url = f"https://{hostname}:{port}/restconf/data/{path}"
response = requests.get(url,
auth=(username, password),
headers=headers,
verify=False)
if response.status_code == 200:
click.echo(print(f'{response.text}'))
click.echo(print(f"\nStatus: {response.status_code} OK"))
if output:
click.echo(f'{response.text}', file=output)
click.echo(print(f'Output has been saved to a file "{output.name}"\n'))
else:
click.echo(print(f"\nRequest Failed: {response}"))
except requests.RequestException as e:
click.echo(print(e))
# Click command 'POST'
@click.option("--hostname", "-n", type=str, required=True, help="Device hostname or IP address for the restconf API")
@click.option("--username", "-u", type=str, required=True, help="Username for restconf api")
@click.option('--password', prompt=True, hide_input=True, confirmation_prompt=False, help="Password for restconf api")
@click.option("--path", "-p", type=str, required=True, help="Path for restconf api call")
@click.option("--port", "-pn", type=int, required=False, default=443, help="Port number for restconf api, default is 443")
@click.option("--accept", "-a", type=str, required=False, default='application/yang-data+json', help="Accept header for restconf api, default is application/yang-data+json")
@click.option("--content-type", "-c", type=str, required=False, default='application/yang-data+json', help="Content-Type header for restconf api, default is application/yang-data+json")
@click.option("--data", "-d", type=str, default='', help="Playload to be sent for POST, PUT and PATCH methods")
@click.option("--from-file", "-ff", type=click.File('r'), required=False, default=None, help="Read the playload from file for POST operation")
@click.command(name="POST", context_settings=dict(help_option_names=['-h', '--help']))
def restconf_post(hostname, username, password, path, port, data, from_file, accept, content_type):
'''
Sends data to the devices to create a new data resource.\n
\b
Example:
\b
# Configure via raw data for POST operation
$ restconf-cli POST -u developer -n sandbox-iosxe-latest-1.cisco.com \ \b
-p ietf-interfaces:interfaces \ \b
-d '{
"interface":[
{
"name":"Loopback999",
"description":"created by python click - POST",
"type":"iana-if-type:softwareLoopback",
"enabled":true,
"ietf-ip:ipv4":{
"address":[
{
"ip":"10.0.1.10",
"netmask":"255.255.255.255"
}
]
}
}
]
}'
\b
# Configure from file for POST operation
$ restconf-cli POST -u developer \ \b
-n sandbox-iosxe-latest-1.cisco.com \ \b
-p ietf-interfaces:interfaces -ff interface.json
'''
try:
headers = {'Accept': accept, 'Content-Type': content_type}
url = f"https://{hostname}:{port}/restconf/data/{path}"
if from_file:
payload = from_file.read()
else:
payload = data
response = requests.post(url,
auth=(username, password),
headers=headers,
data=payload,
verify=False)
if response.status_code == 201:
click.echo(print(f"\nResource has been created successfully: {response.status_code} OK"))
else:
click.echo(print(f"\nRequest Failed: {response}"))
except requests.RequestException as e:
click.echo(print(e))
# Click command 'PUT'
@click.option("--hostname", "-n", type=str, required=True, help="Device hostname or IP address for the restconf API")
@click.option("--username", "-u", type=str, required=True, help="Username for restconf api")
@click.option('--password', prompt=True, hide_input=True, confirmation_prompt=False, help="Password for restconf api")
@click.option("--path", "-p", type=str, required=True, help="Path for restconf api call")
@click.option("--port", "-pn", type=int, required=False, default=443, help="Port number for restconf api, default is 443")
@click.option("--accept", "-a", type=str, required=False, default='application/yang-data+json', help="Accept header for restconf api, default is application/yang-data+json")
@click.option("--content-type", "-c", type=str, required=False, default='application/yang-data+json', help="Content-Type header for restconf api, default is application/yang-data+json")
@click.option("--data", "-d", type=str, default='', help="Playload to be sent for POST, PUT and PATCH methods")
@click.option("--from-file", "-ff", type=click.File('r'), required=False, default=None, help="Read the playload from file for PUT operation")
@click.command(name="PUT", context_settings=dict(help_option_names=['-h', '--help']))
def restconf_put(hostname, username, password, path, port, data, from_file, accept, content_type):
'''
Send data to the devices to create or update the data resource.
\b
Example:
\b
# Configure via raw data for PUT operation
$ restconf-cli PUT -u developer -n sandbox-iosxe-latest-1.cisco.com \ \b
-p ietf-interfaces:interfaces \ \b
-d '{
"interface":[
{
"name":"Loopback999",
"description":"created by python click - PUT",
"type":"iana-if-type:softwareLoopback",
"enabled":true,
"ietf-ip:ipv4":{
"address":[
{
"ip":"10.0.1.10",
"netmask":"255.255.255.255"
}
]
}
}
]
}'
\b
# Configure from file for PUT operation
$ restconf-cli PUT -u developer \ \b
-n sandbox-iosxe-latest-1.cisco.com \ \b
-p ietf-interfaces:interfaces/interface=Loopback999 -ff interface.json
'''
try:
headers = {'Accept': accept, 'Content-Type': content_type}
url = f"https://{hostname}:{port}/restconf/data/{path}"
if from_file:
payload = from_file.read()
else:
payload = data
response = requests.put(url,
auth=(username, password),
headers=headers,
data=payload,
verify=False)
if response.status_code == 204:
click.echo(print(f"\nResource has been created/updated successfully: {response.status_code} OK"))
else:
click.echo(print(f"\nRequest Failed: {response}"))
except requests.RequestException as e:
click.echo(print(e))
# Click command 'PATCH'
@click.option("--hostname", "-n", type=str, required=True, help="Device hostname or IP address for the restconf API")
@click.option("--username", "-u", type=str, required=True, help="Username for restconf api")
@click.option('--password', prompt=True, hide_input=True, confirmation_prompt=False, help="Password for restconf api")
@click.option("--path", "-p", type=str, required=True, help="Path for restconf api call")
@click.option("--port", "-pn", type=int, required=False, default=443, help="Port number for restconf api, default is 443")
@click.option("--accept", "-a", type=str, required=False, default='application/yang-data+json', help="Accept header for restconf api, default is application/yang-data+json")
@click.option("--content-type", "-c", type=str, required=False, default='application/yang-data+json', help="Content-Type header for restconf api, default is application/yang-data+json")
@click.option("--data", "-d", type=str, default='', help="Playload to be sent for POST, PUT and PATCH methods")
@click.option("--from-file", "-ff", type=click.File('r'), required=False, default=None, help="Read the playload from file for PATCH operation")
@click.command(name="PATCH", context_settings=dict(help_option_names=['-h', '--help']))
def restconf_patch(hostname, username, password, path, port, data, from_file, accept, content_type):
'''
same as PUT, except if the resource does not exist,
the devices MUST NOT create one.
\b
Example:
\b
# Configure via raw data for PATCH operation
$ restconf-cli PATCH -u developer -n sandbox-iosxe-latest-1.cisco.com \ \b
-p ietf-interfaces:interfaces \ \b
-d '{
"interface":[
{
"name":"Loopback999",
"description":"created by python click - PATCH",
"type":"iana-if-type:softwareLoopback",
"enabled":true,
"ietf-ip:ipv4":{
"address":[
{
"ip":"10.0.1.10",
"netmask":"255.255.255.255"
}
]
}
}
]
}'
\b
# Configure from file for PATCH operation
$ restconf-cli PATCH -u developer \ \b
-n sandbox-iosxe-latest-1.cisco.com \ \b
-p ietf-interfaces:interfaces/interface=Loopback999 -ff interface.json
'''
try:
headers = {'Accept': accept, 'Content-Type': content_type}
url = f"https://{hostname}:{port}/restconf/data/{path}"
if from_file:
payload = from_file.read()
else:
payload = data
response = requests.patch(url,
auth=(username, password),
headers=headers,
data=payload,
verify=False)
if response.status_code == 204:
click.echo(print(f"\nResource has been updated successfully: {response.status_code} OK"))
else:
click.echo(print(f"\nRequest Failed: {response}"))
except requests.RequestException as e:
click.echo(print(e))
# Click command 'DELETE'
@click.command(name="DELETE", context_settings=dict(help_option_names=['-h', '--help']))
@click.option("--hostname", "-n", type=str, required=True, help="Device hostname or IP address for the restconf API")
@click.option("--username", "-u", type=str, required=True, help="Username for restconf api")
@click.option('--password', prompt=True, hide_input=True, confirmation_prompt=False, help="Password for restconf api")
@click.option("--path", "-p", type=str, required=True, help="Path for restconf api call")
@click.option("--port", "-pn", type=int, required=False, default=443, help="Port number for restconf api, default is 443")
@click.option("--accept", "-a", type=str, required=False, default='application/yang-data+json', help="Accept header for restconf api, default is application/yang-data+json")
@click.option("--content-type", "-c", type=str, required=False, default='application/yang-data+json', help="Content-Type header for restconf api, default is application/yang-data+json")
def restconf_delete(hostname, username, password, path, port, accept, content_type):
'''
Method to delete the target resource
\b
Example:
\b
$ restconf-cli DELETE -u developer -n sandbox-iosxe-latest-1.cisco.com \ \b
-p ietf-interfaces:interfaces/interface=Loopback999
'''
try:
headers = {'Accept': accept, 'Content-Type': content_type}
url = f"https://{hostname}:{port}/restconf/data/{path}"
response = requests.delete(url,
auth=(username, password),
headers=headers,
verify=False)
if response.status_code == 204:
click.echo(print(f"\nResource has been deleted: {response.status_code} OK"))
else:
click.echo(print(f"\nRequest Failed: {response}"))
except requests.RequestException as e:
click.echo(print(e))
# attach child commands to the parent function
restconf_cli.add_command(restconf_get)
restconf_cli.add_command(restconf_post)
restconf_cli.add_command(restconf_put)
restconf_cli.add_command(restconf_patch)
restconf_cli.add_command(restconf_delete)
if __name__ == '__main__':
restconf_cli() | /restconf-cli-0.1.5.tar.gz/restconf-cli-0.1.5/restconf_cli.py | 0.506347 | 0.216632 | restconf_cli.py | pypi |
class HTTPException(Exception):
"""Exception that's thrown when an HTTP request operation fails.
Attributes
------------
response: :class:`aiohttp.ClientResponse`
The response of the failed HTTP request. This is an
instance of :class:`aiohttp.ClientResponse`. In some cases
this could also be a :class:`requests.Response`.
text: :class:`str`
The text of the error. Could be an empty string.
status: :class:`int`
The status code of the HTTP request.
code: :class:`int`
The Discord specific error code for the failure.
"""
def __init__(self, response, message):
self.response = response
self.status = response.status
if isinstance(message, dict):
self.code = message.get('code', 0)
base = message.get('message', '')
errors = message.get('errors')
if errors:
errors = self.flatten_dict(errors)
helpful = '\n'.join('In %s: %s' % t for t in errors.items())
self.text = base + '\n' + helpful
else:
self.text = base
else:
self.text = message
self.code = 0
fmt = '{0.status} {0.reason} (error code: {1})'
if len(self.text):
fmt = fmt + ': {2}'
super().__init__(fmt.format(self.response, self.code, self.text))
def flatten_dict(self, d, key=''):
items = []
for k, v in d.items():
new_key = key + '.' + k if key else k
if isinstance(v, dict):
try:
_errors = v['_errors']
except KeyError:
items.extend(self.flatten_dict(v, new_key).items())
else:
items.append((new_key, ' '.join(x.get('message', '') for x in _errors)))
else:
items.append((new_key, v))
return dict(items)
class BadRequest(HTTPException):
"""Exception that's thrown for when status code 400 occurs."""
pass
class Forbidden(HTTPException):
"""Exception that's thrown for when status code 403 occurs."""
pass
class NotFound(HTTPException):
"""Exception that's thrown for when status code 404 occurs."""
pass
class RateLimited(HTTPException):
"""Exception that's thrown for when status code 429 occurs."""
__slots__ = ('retry_after', 'is_global')
def __init__(self, response, message):
super().__init__(response, message)
self.retry_after = message['retry_after'] / 1000.0
self.is_global = message.get('global', False)
class InternalServerError(HTTPException):
"""Exception that's thrown for when status code 500 occurs."""
pass
class BadGateway(HTTPException):
"""Exception that's thrown for when status code 403 occurs."""
pass | /restcord.py-0.0.6-py3-none-any.whl/restcord/errors.py | 0.875581 | 0.18072 | errors.py | pypi |
import random
from factory import Factory, Faker, LazyAttribute, SubFactory, fuzzy
from pydantic import HttpUrl
from restcountries_cli import constants, models
class CountryNameFactory(Factory):
"""
Main sub factory for the country name
"""
common: str = fuzzy.FuzzyText()
official: str = fuzzy.FuzzyText()
native_name: dict = LazyAttribute(
lambda country_name: {
"en": {
"common": country_name.common,
"official": country_name.official,
}
}
)
class Meta:
model = models.CountryName
class CurrencyFactory(Factory):
"""
Main sub factory for the currency
"""
name: str = fuzzy.FuzzyText()
symbol: str = "€"
class Meta:
model = models.Currency
class IDDFactory(Factory):
"""
Main sub factory for the IDD
"""
root: str = fuzzy.FuzzyText()
suffixes: list[str] = []
class Meta:
model = models.IDD
class LanguageFactory(Factory):
"""
Main sub factory for the language
"""
code: str = fuzzy.FuzzyText(length=3)
name: str = fuzzy.FuzzyText()
class Meta:
model = models.Language
class TranslationFactory(Factory):
"""
Main sub factory for the translation
"""
official: str = fuzzy.FuzzyText()
common: str = fuzzy.FuzzyText()
class Meta:
model = models.Translation
class DemonymFactory(Factory):
"""
Main sub factory for the demonym
"""
f: str = fuzzy.FuzzyText()
m: str = fuzzy.FuzzyText()
class Meta:
model = models.Demonym
class MapsFactory(Factory):
"""
Main sub factory for the map's info
"""
google_maps: HttpUrl = Faker("url")
open_street_maps: HttpUrl = Faker("url")
class Meta:
model = models.Maps
class CarFactory(Factory):
"""
Main sub factory for the car
"""
signs: list[str] = LazyAttribute(lambda _: [fuzzy.FuzzyText().fuzz() for _ in range(0, random.randint(0, 3))])
side: str = fuzzy.FuzzyText()
class Meta:
model = models.Car
class FlagFactory(Factory):
"""
Main sub factory for the flag
"""
png: HttpUrl = Faker("url")
svg: HttpUrl = Faker("url")
alt: str = fuzzy.FuzzyText()
class Meta:
model = models.Flag
class CoatOfArmFactory(Factory):
"""
Main sub factory for the coat of arm
"""
png: HttpUrl = Faker("url")
svg: HttpUrl = Faker("url")
class Meta:
model = models.CoatOfArm
class CapitalInfoFactory(Factory):
"""
Main sub factory for the capital
"""
latlng: list[float] = LazyAttribute(lambda _: [random.randint(-180, 180), random.randint(-180, 180)])
class Meta:
model = models.CapitalInfo
class CountryFactory(Factory):
"""
Main country factory to use in tests
"""
name: models.CountryName = SubFactory(CountryNameFactory)
tld: list[str] = LazyAttribute(lambda country: [f".{country.cca2.lower()}"])
cca2: str = fuzzy.FuzzyText(length=2)
ccn3: str = LazyAttribute(lambda _: str(random.randint(1, 999)))
cca3: str = fuzzy.FuzzyText(length=3)
cioc: str = fuzzy.FuzzyText(length=3)
independent: bool = Faker("boolean")
status: str = fuzzy.FuzzyChoice(constants.VALID_COUNTRIES_STATUSES)
un_member: bool = Faker("boolean")
currencies: dict[str, models.Currency] = LazyAttribute(
lambda _: {
fuzzy.FuzzyText(length=3).fuzz().upper(): CurrencyFactory.build() for _ in range(random.randint(1, 3))
}
)
idd: models.IDD = SubFactory(IDDFactory)
capital: list[str] = LazyAttribute(lambda _: [fuzzy.FuzzyText().fuzz()])
alt_spellings: list[str] = LazyAttribute(lambda _: [fuzzy.FuzzyText().fuzz()])
region: str = fuzzy.FuzzyChoice(constants.VALID_REGIONS)
subregion: str = fuzzy.FuzzyChoice(constants.VALID_SUBREGIONS)
languages: list[models.Language] = LazyAttribute(lambda _: [LanguageFactory() for _ in range(random.randint(1, 3))])
translations: dict[str, models.Translation] = LazyAttribute(
lambda _: {
fuzzy.FuzzyText(length=3).fuzz().upper(): TranslationFactory.build() for _ in range(random.randint(1, 3))
}
)
latlng: list[float] = LazyAttribute(lambda _: [random.randint(-180, 180), random.randint(-180, 180)])
landlocked: bool = Faker("boolean")
borders: list[str] = LazyAttribute(
lambda _: [fuzzy.FuzzyText(length=3).fuzz().upper() for _ in range(random.randint(1, 3))]
)
area: float = fuzzy.FuzzyFloat(low=0)
demonyms: dict[str, models.Demonym] = LazyAttribute(
lambda _: {
fuzzy.FuzzyText(length=3).fuzz().lower(): DemonymFactory.build() for _ in range(random.randint(1, 3))
}
)
flag: str = fuzzy.FuzzyText(length=1)
maps: models.Maps = SubFactory(MapsFactory)
population: int = fuzzy.FuzzyInteger(low=0)
gini: dict[str, float] = LazyAttribute(
lambda _: {
str(fuzzy.FuzzyInteger(low=0).fuzz()): fuzzy.FuzzyFloat(low=0).fuzz() for _ in range(random.randint(1, 3))
}
)
fifa: str = fuzzy.FuzzyText()
car: models.Car = SubFactory(CarFactory)
timezones: list[str] = LazyAttribute(
lambda _: [fuzzy.FuzzyText(length=3).fuzz().upper() for _ in range(random.randint(1, 3))]
)
continents: list[str] = LazyAttribute(lambda _: random.choices(constants.VALID_CONTINENTS, k=random.randint(1, 3)))
flags: models.Flag = SubFactory(FlagFactory)
coat_of_arms: models.CoatOfArm = SubFactory(CoatOfArmFactory)
start_of_week: str = fuzzy.FuzzyChoice(choices=constants.VALID_START_OF_THE_WEEK)
capital_info: models.CapitalInfo = SubFactory(CapitalInfoFactory)
postal_code: dict[str, str] = LazyAttribute(
lambda _: {
"format": fuzzy.FuzzyText().fuzz(),
"regex": fuzzy.FuzzyText().fuzz(),
}
)
class Meta:
model = models.Country | /restcountries_cli-0.0.1.tar.gz/restcountries_cli-0.0.1/restcountries_cli/factories.py | 0.613815 | 0.343067 | factories.py | pypi |
from pydantic import BaseModel, HttpUrl
class CountryBaseName(BaseModel):
"""
Helper pydantic model that has common fields for the native name and the country name
"""
common: str
official: str
class CountryName(CountryBaseName):
"""
Model for the country name
"""
native_name: dict[str, CountryBaseName]
class Currency(BaseModel):
"""
Currency of a country
"""
name: str
symbol: str | None = "" # Countries like Bosnia and Herzegovina has not symbol
class IDD(BaseModel):
"""
International Direct Dialing of the country
"""
root: str | None = None
suffixes: list[str] | None = []
class Language(BaseModel):
"""
Language of a country
"""
code: str
name: str
class Translation(BaseModel):
"""
Model for a concrete translation of a country
"""
official: str
common: str
class Demonym(BaseModel):
"""
Demonym in a language for the country
"""
f: str
m: str
class Maps(BaseModel):
"""
URLs to the map addresses of the countries
"""
google_maps: HttpUrl
open_street_maps: HttpUrl
class Car(BaseModel):
"""
Todo, WTF is this?
"""
signs: list[str] | None = [] # Countries like Aruba have not signs
side: str
class CountryImageBase(BaseModel):
"""
Helper model for images
"""
png: HttpUrl | None = None
svg: HttpUrl | None = None
class Flag(CountryImageBase):
"""
Flag of the country model
"""
alt: str | None = None
class CoatOfArm(CountryImageBase):
"""
Coat of arms information of the country
"""
class CapitalInfo(BaseModel):
"""
Capital information about the country
"""
latlng: list[float | None] = [None, None]
@property
def latitude(self) -> float | None:
"""
Property to return only the latitude
:return:
"""
return self.latlng[0]
@property
def longitude(self) -> float | None:
"""
Property to return only the longitude
:return:
"""
return self.latlng[1]
class Country(CapitalInfo):
"""
Country model with all the information and validations with pydantic
"""
name: CountryName
tld: list[str] | None
cca2: str
ccn3: str | None
cca3: str
cioc: str | None
independent: bool | None
status: str
un_member: bool
currencies: dict[str, Currency]
idd: IDD # International Direct Dialing
# A country can have multiple capitals or none as Antarctica
capital: list[str] | None
alt_spellings: list[str]
region: str
subregion: str | None # Antarctica, for example, has not subregion
languages: list[Language]
translations: dict[str, Translation]
landlocked: bool
borders: list[str] | None
area: float
demonyms: dict[str, Demonym]
flag: str
maps: Maps
population: int
gini: dict[str, float] | None
fifa: str | None
car: Car
timezones: list[str]
continents: list[str]
flags: Flag
coat_of_arms: CoatOfArm | None
start_of_week: str
capital_info: CapitalInfo
postal_code: dict[str, str] = {"format": "", "regex": ""}
@property
def valid_names(self) -> list:
"""
Property that returns all the valid names of the countries in lowercase.
These names are the common and the official name and the native common and official names for all the languages
present in the native name dict.
:return:
"""
return (
[
self.name.common.lower(),
self.name.official.lower(),
]
+ [base_name.common.lower() for base_name in self.name.native_name.values()]
+ [base_name.official.lower() for base_name in self.name.native_name.values()]
)
def __eq__(self, other: object) -> bool:
"""
2 Countries will be the same if their cca2 are the same
:param other:
:return:
"""
if not isinstance(other, Country): # Avoid violate the Liskov substitution principle
return NotImplemented
return self.cca2 == other.cca2 | /restcountries_cli-0.0.1.tar.gz/restcountries_cli-0.0.1/restcountries_cli/models.py | 0.850298 | 0.495606 | models.py | pypi |
import os.path
import uuid
import requests_cache
from restcountries_cli.exceptions import APIException, NotValidEndpoint
from restcountries_cli.models import Country
class RestCountriesCli:
"""
Main client to obtain the values of the countries from restcountries
"""
def __init__(
self,
base_url: str = "https://restcountries.com",
version: str = "v3.1",
cached_session: bool = True,
cache_name: str | None = None,
):
"""
Initialization of the cli. It will start the request session and set the main
parameters to call the API.
:param base_url: Url to call where the API is hosted.
:param version: Version to use in the API call.
:param cached_session: Bool that shows if the calls are cached or not.
"""
# Base parameters for the client to work
self.base_url = base_url
self.version = version
self.url = f"{base_url}/{version}"
self.cached_session = cached_session
self.cache_name = cache_name if cache_name else uuid.uuid4().hex
# If the session is cached, it will only call the API once
if self.cached_session:
self.session = requests_cache.CachedSession(cache_name=self.cache_name)
else:
self.session = requests_cache.OriginalSession() # type: ignore
# Store the countries in the cli internally to avoid unnecessary calls
self.countries: list[Country] = []
# Endpoints to use
self.all_endpoint = "all"
self.name_endpoint = "name"
@staticmethod
def parse_country(country: dict) -> Country:
"""
Country obtained from the API call. It is transformed into a dict from a json.
:param country: The Country object using the model.
:return:
"""
# Correct some corner cases like Finland for open_street_maps
open_street_maps = country.get("maps", {}).get("openStreetMaps")
if "openstreetmap.org" in open_street_maps and "http" not in open_street_maps:
open_street_maps = f"https://www.{open_street_maps}"
return Country(
name={
"common": country.get("name", {}).get("common"),
"official": country.get("name", {}).get("official"),
"native_name": country.get("name", {}).get("nativeName", {}),
},
tld=country.get("tld"),
cca2=country.get("cca2"),
ccn3=country.get("ccn3"),
cca3=country.get("cca3"),
cioc=country.get("cioc"),
independent=country.get("independent"),
status=country.get("status"),
un_member=country.get("unMember"),
currencies=country.get("currencies", {}),
idd=country.get("idd"),
capital=country.get("capital"),
alt_spellings=country.get("altSpellings"),
region=country.get("region"),
subregion=country.get("subregion"),
languages=[{"code": code, "name": name} for code, name in country.get("languages", {}).items()],
translations=country.get("translations", {}),
latlng=country.get("latlng"),
landlocked=country.get("landlocked"),
borders=country.get("borders"),
area=country.get("area"),
demonyms=country.get("demonyms", {}),
flag=country.get("flag"),
maps={
"google_maps": country.get("maps", {}).get("googleMaps"),
"open_street_maps": open_street_maps,
},
population=country.get("population"),
gini=country.get("gini"),
fifa=country.get("fifa"),
car=country.get("car"),
timezones=country.get("timezones"),
continents=country.get("continents"),
flags=country.get("flags"),
coat_of_arms=country.get("coatOfArms"),
start_of_week=country.get("startOfWeek"),
capital_info=country.get("capitalInfo"),
)
def __del__(self):
"""
Make sure that when the cli is destroyed, the cached file is deleted if it exists
:return:
"""
self.clean_cache_file()
def clean_cache_file(self):
"""
Clean the cache by removing the cache file if it does exist.
:return:
"""
if os.path.exists(f"{self.cache_name}.sqlite"):
os.remove(f"{self.cache_name}.sqlite")
def refresh_cached_session(self):
"""
Refresh the cached session if it was cached.
If this is the case, it is also cleaning the old cache file.
:return:
"""
if self.cached_session:
self.clean_cache_file()
self.cache_name = uuid.uuid4().hex
self.session = requests_cache.CachedSession(cache_name=self.cache_name)
self.countries = []
def all(self) -> list[Country]:
"""
Get all the countries calling the endpoint "all" of the API.
:return:
"""
countries = []
response = self.session.get(f"{self.url}/{self.all_endpoint}")
match response.status_code:
case 200:
# If the response is ok, we parse all the countries
for country in response.json():
country = self.parse_country(country=country)
countries.append(country)
case 404:
raise NotValidEndpoint(f"url {self.url}/{self.all_endpoint} is not valid")
case 500:
raise APIException(f"url {self.url}/{self.all_endpoint} is experienced an internal error")
case _:
raise APIException(
f"url {self.url}/{self.all_endpoint} is experienced an error with "
f"code: '{response.status_code}' and body: '{response.json()}'"
)
self.countries = countries
return self.countries
def country_name(self, country_name: str, full_name: bool = False, force_query: bool = False) -> Country:
"""
Get a country based on the name, calling the endpoint "name" of the API.
It can be just by name or using the full name.
Using full name will search for exact values, it can be the common or official
value. This will add the query-parameter "fullText"
If the country is in the internal cli list, it will not make the call to the API.
:param force_query:
:param country_name:
:param full_name:
:return:
"""
# first of all, check if the country is in the country internal cli list
if (
cached_country := [country for country in self.countries if (country_name.lower() in country.valid_names)]
) and not force_query:
return cached_country[0]
# Get the country and append it to the country list
query = f"{self.url}/{self.name_endpoint}/{country_name}"
if full_name:
query += "/fullText=true"
response = self.session.get(query)
match response.status_code:
case 200:
# If the response is ok, we parse all the countries
country = self.parse_country(country=response.json()[0])
self.countries.append(country)
return country
case 404:
raise NotValidEndpoint(f"url {self.url}/{self.name_endpoint} is not valid")
case 500:
raise APIException(f"url {self.url}/{self.name_endpoint} is experienced an internal " f"error")
case _:
raise APIException(
f"url {self.url}/{self.name_endpoint} returned unexpected code " f"'{response.status_code}'"
) | /restcountries_cli-0.0.1.tar.gz/restcountries_cli-0.0.1/restcountries_cli/cli.py | 0.696784 | 0.196961 | cli.py | pypi |
from ..utils import Decorators
class CollectionAPI:
"""
The API calls defined here are used to make calls to your collection in your database.
"""
TEMPLATE_URL_COLLECTION = "https://{}.restdb.io/rest/{}"
TEMPLATE_URL_COLLECTION_ID = "https://{}.restdb.io/rest/{}/{}"
TEMPLATE_URL_COLLECTION_ID_SUBCOLLECTION = "https://{}.restdb.io/rest/{}/{}/{}"
TEMPLATE_URL_COLLECTION_ID_SUBCOLLECTION_ID = "https://{}.restdb.io/rest/{}/{}/{}/{}"
def __init__(self, dburl, x_apikey):
"""
Parameters:
dburl --- represents the beginning of the url to your database. Usually the same as your database name on your dashboard.
x_apikey --- represents the API key you will be using to access your database. Go to your Database Settings and choose API to see your API key.
"""
self.dburl = dburl
self.x_apikey = x_apikey
self.header = {"x-apikey": self.x_apikey}
@Decorators.url_to_get
def get_records_from_collection(self, collection_name, q={}, _filter=None, _sort=None, _dir=None, skip=None, _max=None, groupby=None, aggregate=None):
"""
Performs a get request to the collection at the database. The queries can be specialized with the keyword arguments.
Parameters:
collection_name --- name of the collection to retrieve information from.
Keyword Arguments: (visit https://restdb.io/media/restdb-cheat-sheet.pdf for more info)
q --- specifies the search query.
_filter --- filters the search results.
_sort --- sorts the results according to a column value.
_dir --- the direction of the sort. -1 for reverse. 1 by default.
skip --- how many results to be skipped from the beginning.
_max --- how many results to be returned at maximum.
groupby --- grouping certain results according to the given column value.
aggregate --- usually used with groupby. performs aggregation functions (sum, avg, min, max, count) on given columns of the result.
"""
url = CollectionAPI.TEMPLATE_URL_COLLECTION.format(self.dburl, collection_name)
url += "?q=" + str(q)
if _filter is not None:
url += "&filter=" + _filter
if _sort is not None:
if type(_sort) is str:
url += "&sort=" + _sort
else:
for sort_parameter in _sort:
url += "&sort=" + sort_parameter
if _dir is not None:
url += "&dir=" + str(_dir)
if skip is not None:
url += "&skip=" + str(skip)
if _max is not None:
url += "&max=" + str(_max)
if groupby is not None:
url += "&groupby=" + groupby
if aggregate is not None:
for aggregator in aggregate:
url += "&aggregate=" + aggregator.upper() + ":" + aggregate[aggregator]
url = url.replace("'", '"')
return url
@Decorators.url_to_get
def get_record_from_collection(self, collection_name, record_id):
"""
Performs a get request to the collection at the database with the specified ID.
Parameters:
collection_name --- name of the collection to retrieve information from.
record_id --- ID of the record to be fetched.
"""
url = CollectionAPI.TEMPLATE_URL_COLLECTION_ID.format(self.dburl, collection_name, record_id)
return url
@Decorators.url_to_get
def get_records_from_subcollection(self, collection_name, record_id, subcollection_name):
"""
Performs a get request to the subcollection under the record of the collection with the specified ID at the database.
Parameters:
collection_name --- name of the collection to retrieve information from.
record_id --- ID of the record.
subcollection_name --- name of the subcollection to retrieve information from.
"""
url = CollectionAPI.TEMPLATE_URL_COLLECTION_ID_SUBCOLLECTION.format(self.dburl, collection_name, record_id, subcollection_name)
return url
@Decorators.url_to_get
def get_record_from_subcollection(self, collection_name, record_id, subcollection_name, record_id2):
"""
Performs a get request to the specified record in the subcollection under the record of the collection with the specified ID at the database.
Parameters:
collection_name --- name of the collection to retrieve information from.
record_id --- ID of the record.
subcollection_name --- name of the subcollection to retrieve information from.
record_id2 --- ID of the record within the subcollection to be fetched.
"""
url = CollectionAPI.TEMPLATE_URL_COLLECTION_ID_SUBCOLLECTION_ID.format(self.dburl, collection_name, record_id, subcollection_name, record_id2)
return url
@Decorators.url_to_post
def post_new_record_on_collection(self, collection_name, new_record, validate=True):
"""
Performs a post request to the collection at the database.
Parameters:
collection_name --- name of the collection to send information.
new_record --- the new data to be sent to the database.
Keyword Arguments:
validate --- whether or not validation should be performed. set to False for slightly faster performance (not recommended at early stages).
"""
url = CollectionAPI.TEMPLATE_URL_COLLECTION.format(self.dburl, collection_name)
if not validate:
url += "?validate=false"
return url
@Decorators.url_to_post
def post_new_record_on_subcollection(self, collection_name, record_id, subcollection_name, new_record, validate=True):
"""
Performs a post request to the collection at the database.
Parameters:
collection_name --- name of the collection to send information.
record_id --- the ID of the record.
subcollection_name --- name of the subcollection to send information.
new_record --- the new data to be sent to the database.
Keyword Arguments:
validate --- whether or not validation should be performed. set to False for slightly faster performance (not recommended at early stages).
"""
url = CollectionAPI.TEMPLATE_URL_COLLECTION_ID_SUBCOLLECTION.format(self.dburl, collection_name, record_id, subcollection_name)
if not validate:
url += "?validate=false"
return url
@Decorators.url_to_put
def put_existing_record_in_collection(self, collection_name, record_id, new_record):
"""
Performs a put request to the collection at the database with the specified ID.
Parameters:
collection_name --- name of the collection to send information.
record_id --- ID of the record to be modified.
new_record --- the new record to be sent.
"""
url = CollectionAPI.TEMPLATE_URL_COLLECTION_ID.format(self.dburl, collection_name, record_id)
return url
@Decorators.url_to_patch
def patch_existing_record_in_collection(self, collection_name, record_id, new_record):
"""
Performs a patch request to the collection at the database with the specified ID.
Parameters:
collection_name --- name of the collection to send information.
record_id --- ID of the record to be updated.
new_record --- the new record to be sent.
"""
url = CollectionAPI.TEMPLATE_URL_COLLECTION_ID.format(self.dburl, collection_name, record_id)
return url
@Decorators.url_to_delete
def delete_record_by_id(self, collection_name, record_id):
"""
Performs a delete request to the collection at the database with the specified ID.
Parameters:
collection_name --- name of the collection to delete record from.
record_id --- ID of the record to be deleted.
"""
url = CollectionAPI.TEMPLATE_URL_COLLECTION_ID.format(self.dburl, collection_name, record_id)
return url
@Decorators.url_to_delete
def delete_record_by_query(self, collection_name, q={}):
"""
Performs a delete request to the collection at the database. Specialized with a query.
Parameters:
collection_name --- name of the collection to delete record from.
q --- query dictionary for deletion. cannot be empty.
"""
if not q:
raise ValueError("q cannot be empty")
url = CollectionAPI.TEMPLATE_URL_COLLECTION.format(self.dburl, collection_name)
url += "/*?q=" + str(q)
url = url.replace("'", '"')
return url | /restdbio_api_wrapper-0.0.12.tar.gz/restdbio_api_wrapper-0.0.12/restdbio_api_wrapper/apis/CollectionAPI.py | 0.812123 | 0.20466 | CollectionAPI.py | pypi |
class ExceptionsMeta(type):
"""
Metaclass for the exceptions I will be declaring. Since all exceptions are structurally the same, I decided using a metaclass would prove useful.
"""
def __new__(cls, name, bases=tuple(), dct=dict()):
x = super().__new__(cls, name, bases, dct)
def __init__(self, message):
Exception.__init__(self, message)
x.__init__ = __init__
return x
def response_code_handler(response_code):
"""
Maps the response code to the corresponding exception to be thrown. For some exceptions, I tried to make the message
a bit more straightforward, with the possible reason why the exception may have occurred. For others, I used the generalized messages.
Parameters:
response_code --- the status code received from the HTTP request. Usually a Response object's status_code attribute.
"""
if response_code == 400:
raise BadRequest("May be because your request data format mismatched the format on database, or the record primary key already existed.")
if response_code == 401:
raise Unauthorized("Check your dburl and x-apikey.")
if response_code == 403:
raise Forbidden("The request was a legal request, but the server is refusing to respond to it.")
if response_code == 404:
raise NotFound("The server has not found anything matching your request data.")
if response_code == 408:
raise RequestTimeout("The server did not respond within the specified time.")
if response_code == 409:
raise Conflict("The request could not be completed due to a conflict with the current state of the resource.")
if response_code == 500:
raise InternalServerError("The server encountered an unexpected condition which prevented it from fulfilling the request.")
class BadRequest(Exception, metaclass=ExceptionsMeta):
"""
Intended to be thrown when HTTP response code is 400.
"""
pass
class Unauthorized(Exception, metaclass=ExceptionsMeta):
"""
Intended to be thrown when HTTP response code is 401.
"""
pass
class Forbidden(Exception, metaclass=ExceptionsMeta):
"""
Intended to be thrown when HTTP response code is 403.
"""
pass
class NotFound(Exception, metaclass=ExceptionsMeta):
"""
Intended to be thrown when HTTP response code is 404.
"""
pass
class RequestTimeout(Exception, metaclass=ExceptionsMeta):
"""
Intended to be thrown when HTTP response code is 408.
"""
pass
class Conflict(Exception, metaclass=ExceptionsMeta):
"""
Intended to be thrown when HTTP response code is 409.
"""
pass
class InternalServerError(Exception, metaclass=ExceptionsMeta):
"""
Intended to be thrown when HTTP response code is 410.
"""
pass | /restdbio_api_wrapper-0.0.12.tar.gz/restdbio_api_wrapper-0.0.12/restdbio_api_wrapper/utils/Exceptions.py | 0.82566 | 0.25197 | Exceptions.py | pypi |
import requests
from .Exceptions import *
class Decorators:
"""
All decorators used within the package are defined here.
"""
@staticmethod
def url_to_get(method):
"""
Decorates a method that returns a url, converting it into a get_request_performer.
Parameters:
method --- the method to be decorated.
Returns:
get_request_performer --- the modified function that performs the get request.
"""
def get_request_performer(obj, *args, **kwargs):
url = method(obj, *args, **kwargs)
response = requests.get(url, headers=obj.header)
response_code_handler(response.status_code)
return response.json()
get_request_performer.__name__ = method.__name__
get_request_performer.__doc__ = method.__doc__
return get_request_performer
@staticmethod
def url_to_post(method):
"""
Decorates a method that returns a url, converting it into a post_request_performer.
Parameters:
method --- the method to be decorated.
Returns:
post_request_performer --- the modified function that performs the post request.
"""
def post_request_performer(obj, *args, **kwargs):
url = method(obj, *args, **kwargs)
post_data = args[-1]
response = requests.post(url, post_data, headers=obj.header)
response_code_handler(response.status_code)
post_request_performer.__name__ = method.__name__
post_request_performer.__doc__ = method.__doc__
return post_request_performer
@staticmethod
def url_to_put(method):
"""
Decorates a method that returns a url, converting it into a put_request_performer.
Parameters:
method --- the method to be decorated.
Returns:
put_request_performer --- the modified function that performs the put request.
"""
def put_request_performer(obj, *args, **kwargs):
url = method(obj, *args, **kwargs)
put_data = args[-1]
response = requests.put(url, put_data, headers=obj.header)
response_code_handler(response.status_code)
put_request_performer.__name__ = method.__name__
put_request_performer.__doc__ = method.__doc__
return put_request_performer
@staticmethod
def url_to_patch(method):
"""
Decorates a method that returns a url, converting it into a patch_request_performer.
Parameters:
method --- the method to be decorated.
Returns:
patch_request_performer --- the modified function that performs the patch request.
"""
def patch_request_performer(obj, *args, **kwargs):
url = method(obj, *args, **kwargs)
patch_data = args[-1]
response = requests.patch(url, patch_data, headers=obj.header)
response_code_handler(response.status_code)
patch_request_performer.__name__ = method.__name__
patch_request_performer.__doc__ = method.__doc__
return patch_request_performer
@staticmethod
def url_to_delete(method):
"""
Decorates a method that returns a url, converting it into a delete_request_performer.
Parameters:
method --- the method to be decorated.
Returns:
delete_request_performer --- the modified function that performs the delete request.
"""
def delete_request_performer(obj, *args, **kwargs):
url = method(obj, *args, **kwargs)
response = requests.delete(url, headers=obj.header)
response_code_handler(response.status_code)
delete_request_performer.__name__ = method.__name__
delete_request_performer.__doc__ = method.__doc__
return delete_request_performer | /restdbio_api_wrapper-0.0.12.tar.gz/restdbio_api_wrapper-0.0.12/restdbio_api_wrapper/utils/Decorators.py | 0.919949 | 0.206034 | Decorators.py | pypi |
# RestDoctor
BestDoctor's batteries for REST services.
## Для чего нужен RestDoctor
У нас в BestDoctor есть [свой API Guide](https://github.com/best-doctor/guides/blob/master/guides/api_guide.md), в котором написано, как API должно быть
построено. А еще у нас есть Django и довольно логично использовать Django Rest Framework. Он достаточно гибкий,
однако в некоторых местах мы хотим получить больше контроля и соблюдения своих правил.
Поэтому мы написали свою надстройку над DRF, которая имеет
1. Полную изоляцию между версиями API
1. Версионирование через заголовок `Accept`
1. Декларативную настройку сериализаторов и классов разрешений для `View` и `ViewSet`
1. Прокачанную генерацию схемы
## Быстрый старт
Добавляем пакет `restdoctor` в зависимости или ставим через pip, добавляем `restdoctor` в `INSTALLED_APPS`.
После этого можно использовать ViewSet'ы из restdoctor, заменив импорты `rest_framework` на
`restdoctor.rest_framework`.
Пример на основе tutorial DRF. Было:
```python
from django.contrib.auth.models import User
from rest_framework import viewsets
from rest_framework import permissions
from tutorial.quickstart.serializers import UserSerializer, UserListSerializer
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated]
def get_serializer_class(self):
if self.action == 'list':
return UserListSerializer
return self.serializer_class
```
Стало:
```python
from django.contrib.auth.models import User
from restdoctor.rest_framework import viewsets
from rest_framework import permissions
from tutorial.quickstart.serializers import UserSerializer, UserListSerializer
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class_map = {
'default': UserSerializer,
'list': {
'response': UserListSerializer,
},
}
permission_classes_map = {
'default': [permissions.IsAuthenticated]
}
```
### Дальнейшая настройка
Для разбора формата из заголовка Accept необходимо добавить middleware в конфигурацию приложения:
```python
ROOT_URLCONF = ...
MIDDLEWARE = [
...,
'restdoctor.django.middleware.api_selector.ApiSelectorMiddleware',
]
API_PREFIXES = ('/api',)
API_FORMATS = ('full', 'compact')
```
После этого для префиксов, указанных в `API_PREFIXES`? будет производиться разбор заголовка Accept. Во время обработки
запроса во View или ViewSet в request добавится атрибут `api_params`.
## Установка и конфигурирование
Добавляем настройки в Settings:
```python
ROOT_URLCONF = 'app.urls'
INSTALLED_APPS = [
...,
'rest_framework',
'restdoctor',
]
MIDDLEWARE = [
...,
'restdoctor.django.middleware.api_selector.ApiSelectorMiddleware',
]
API_FALLBACK_VERSION = 'fallback'
API_FALLBACK_FOR_APPLICATION_JSON_ONLY = False
API_DEFAULT_VERSION = 'v1'
API_DEFAULT_FORMAT = 'full'
API_PREFIXES = ('/api',)
API_FORMATS = ('full', 'compact')
API_RESOURCE_DISCRIMINATIVE_PARAM = 'view_type'
API_RESOURCE_DEFAULT = 'common'
API_RESOURCE_SET_PARAM = False
API_RESOURCE_SET_PARAM_FOR_DEFAULT = False
API_V1_URLCONF = 'api.v1_urls'
API_VERSIONS = {
'fallback': ROOT_URLCONF,
'v1': API_V1_URLCONF,
}
```
## Использование в проекте
Максимально наследуемся от restdoctor там, где есть выбор между `rest_framework`
и `restdoctor.rest_framework`.
```python
from restdoctor.rest_framework.serializers import ModelSerializer
from restdoctor.rest_framework.views import GenericAPIView, RetrieveAPIView, ListAPIView
from restdoctor.rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet
```
### Версионирование
RestDoctor маршрутизирует вызовы по заголовку `Accept` на изолированный `UrlConf`.
1. Во-первых, это означает, что без корректного заголовка `Accept` ручки API могут быть недоступны и отдавать 404.
1. А во-вторых, в приложении может быть несколько различных версий API, которые не будут "видеть" друг друга.
Общий формат заголовка следующий:
```
application/vnd.{vendor}.{version}[-{resource}][.{format}][+json]
```
Где vendor задается на уровне приложения параметром `API_VENDOR_STRING`, список версий и сопоставление их UrlConf'ам
определяется параметром `API_VERSIONS`.
Саму маршрутизацию для входящего запроса проводит middleware `ApiSelectorMiddleware`, которую надо включить в
настройках.
```python
ROOT_URLCONF = 'app.urls'
MIDDLEWARE = [
...,
'restdoctor.django.middleware.api_selector.ApiSelectorMiddleware',
]
API_V1_URLCONF = 'api.v1.urls'
API_VENDOR_STRING = 'RestDoctor'
API_FALLBACK_VERSION = 'fallback'
API_DEFAULT_VERSION = 'v1'
API_VERSIONS = {
API_FALLBACK_VERSION: ROOT_URLCONF,
API_DEFAULT_VERSION: API_V1_URLCONF,
}
```
Маршрутизация по `API_VERSIONS` срабатывает, если Accept начинается с `application/vnd.{vendor}`,
если не указана версия, то берется `API_DEFAULT_VERSION`. Если Accept не содержит корректной vendor-строки, то
выбирается `API_FALLBACK_VERSION`.
Версия может быть указана в формате `{version}-{resource}`, тогда `ResourceViewSet` будет использовать эту информацию
для выбора `ViewSet`.
Кроме того, может быть дополнительно указан `{format}` для выбора формата ответа, по факту выбор сериализатора в
`SerializerClassMapApiView`.
Также у формата тоже могут быть версии. Если `{format}` в `API_FORMATS` задан `version:{2,3,5}` в запросе Accept фигурирует только номер версии `version:5`.
Выбор сериализатора происходит от большого к меньшему.
В случае успешного определения версии и параметров API из заголовка Accept, middleware выбирает для дальнейшей обработки
запроса конкретный UrlConf и добавляет к объекту `request` атрибут `api_params`.
### Формат ответа API
Нашим API Guide задан [формат ответа](https://github.com/best-doctor/guides/blob/master/guides/api_guide.md#%D1%84%D0%BE%D1%80%D0%BC%D0%B0%D1%82-%D0%B7%D0%B0%D0%BF%D1%80%D0%BE%D1%81%D0%B0-%D0%B8-%D0%BE%D1%82%D0%B2%D0%B5%D1%82%D0%B0), за который отвечает RestDoctorRenderer
(`restdoctor.rest_framework.renderers.RestDoctorRenderer`). Включается он только для запросов, содержащих атрибут
`api_params`, и работает этот механизм через `content_negotiation_class` заданный в базовом для View и ViewSet
миксине NegotiatedMixin (`restdoctor.rest_framework.mixins.NegotiatedMixin`).
### SerializerClassMapApiView
DRF позволяет достаточно компактно определять `ModelSeraizlier` + `ModelViewSet`, однако оставляет достаточно много
свободы в одних местах, не предоставляя ее в других.
Например, можно переопределить `serializer_class` в классе ViewSet'а, либо определять его динамически через
`ViewSet.get_serializer_class`, однако нельзя переопределять сериализаторы отдельно для запроса, отдельно для ответа.
Т.е. нельзя задать отдельный сериализатор для `update`, используя сериализатор для `retrieve` для возврата измененной
сущности.
`SerializerClassMapApiView` дает возможность декларативно задавать сериализаторы для различных action, отдельно для
request и response.
Поддержка на уровне базовых миксинов для ViewSet'ов позволяет прозрачно заменить, например,
`ReadOnlyModelViewSet` в импортах с `rest_framework.viewsets` на `restdoctor.rest_framework.viewsets`.
#### serializer_class_map
`SerializerClassMapApiView` позволяет задавать сериализаторы для разных action'ов и форматов ответа отдельно для
request и response фазы обработки запроса.
```python
from restdoctor.rest_framework.viewsets import ModelViewSet
from app.api.serializers import (
MyDefaultSerializer, MyCompactSerializer, MyAntoherSerializer,
MyCreateSerializer, MyUpdateSerializer,
)
class MyApiView(SerializerClassMapApiView):
"""Пример работы с serializer_class_map."""
serializer_class_map = {
'default': MyDefaultSerializer,
'default.compact': MyCompactSerializer,
'create': {
'request': MyCreateSerializer,
},
'update': {
'request': MyUpdateSerializer,
'request.version:3': MyVersion3UpdateSerializer,
'request.version:2': MyVersionUpdateSerializer,
},
'list': {
'response.another_format': MyAnotherSerializer,
'meta': MyMetaSerializer,
}
}
```
В этом примере мы задаем `MyDefaultSerializer` как базовый для ViewSet. Но для `create` и `update` action
переопределяем сериализаторы для обработки request'а.
Кроме того, мы определили сериализатор для `compact` формата и отдельно для action `list` и `update` форматы `another_format`, `version:2`, `version:3`.
Формат с версиями работает по принципу поиска точной или меньшей версии сериализатора.
Отдельно добавлена дополнительное формирование meta информации.
#### permission_classes_map
По аналогии с `serializer_class_map` для декларативного задания разных наборов `permission_classes` на разных action'ах
можно определить `permission_classes_map`:
```python
from restdoctor.rest_framework.viewsets import ModelViewSet
from app.api.permissions import PermissionA, PermissionB
class MyViewSet(ModelViewSet):
permission_classes_map = {
'default': [PermissionA],
'retrieve': [PermissionB],
}
```
#### Замечание про action
В DRF action появляется во время регистрации `ViewSet` с помощью `Router`. При этом для разделения list/detail ресурсов
используются разные наборы `action_maps`:
```
list_action_map = {'get': 'list', 'post': 'create'}
detail_action_map = {'get': 'retrieve', 'put': 'update'}
```
Django-механизмы роутинга создают функцию-обработчик, которая инстанцирует View/ViewSet с нужными параметрами.
При этом один и тот же класс `ViewSet` будет присутствовать в UrlConf в двух экземплярах с разными `action_map`.
Во время обработки запроса по HTTP методу будет определен action и вызван соответствующий метод экземпляра `ViewSet`.
И во время обработки запроса у `ViewSet` всегда задан `self.action`.
Однако это не так для `View`, поэтому в `SerializerClassMapApiView` добавлен атрибут `action`, на который завязывается
поиск сериализатора в `serializer_class_map`.
### Миксины и ModelViewSet
Миксины задают базовые операции `ModelViewSet` для `'list'`, `'retrieve'`, `'create'`, `'update'`, `'destroy'` action'ов.
От DRF-версий они отличаются в основном тем, что используют `SerializerClassMapApiView.get_request_serializer` и
`SerializerClassMapApiView.get_response_serializer` вместо `View.get_serializer`.
#### RetrieveModelMixin
Определяет обработчик для `retrieve` action. Определяет метод `get_item`:
```python
class RetrieveModelMixin(BaseListModelMixin):
def retrieve(self, request: Request, *args: typing.Any, **kwargs: typing.Any) -> Response:
item = self.get_item(request_serializer)
...
def get_item(self, request_serializer: BaseSerializer) -> typing.Union[typing.Dict, QuerySet]:
return self.get_object()
```
Т.е. можно использовать `RetrieveModelMixin` для работы с любыми словарями, а не только моделями, надо только
переопределить `ViewSet.get_item`.
#### ListModelMixin
Определяет обработчик для `list` action. Определяет метод `get_collection`:
```python
class ListModelMixin(BaseListModelMixin):
def list(self, request: Request, *args: typing.Any, **kwargs: typing.Any) -> Response:
queryset = self.get_collection()
...
def get_collection(self, request_serializer: BaseSerializer) -> typing.Union[typing.List, QuerySet]:
return self.filter_queryset(self.get_queryset())
```
Т.е. можно использовать `ListModelMixin` для работы с любыми коллекциями, а не только моделями, надо только
переопределить `ViewSet.get_collection`. При этом, если задан сериализатор для `list`, то он будет использован
для query-параметров, что позволит получить эти параметры и использовать дополнительно к filterset'у.
Определяет формирование дополнительной `meta` информации. Определяет метод `get_meta_data`:
```python
class ListModelMixin(BaseListModelMixin):
def get_meta_data(self) -> typing.Dict[str, typing.Any]:
return {'test': typing.Any}
```
Т.е. можно использовать `ListModelMixin` для формирования дополнительной информации в поле `meta`.
Для корректной работы нужно определить сериализатор для `meta`.
```python
serializer_class_map = {
'default': MyDefaultSerializer,
'list': {
'meta': MyMetaSerializer,
}
}
```
Задан обработчик `perform_list` для выбранных данных в пагинации.
Для работы нужно переопределить метод `perform_list`.
```python
class ListModelMixin(BaseListModelMixin):
def perform_list(self, data: typing.Union[typing.List, QuerySet]) -> None:
Sender(data)
```
#### ListModelViewSet
Задан только обработчик для `list` action.
#### ReadOnlyModelViewSet
Заданы обработчики для `list` и `retrieve` action'ов.
#### CreateUpdateReadModelViewSet
Заданы обработчики для `list`, `retrieve`, `create`, `update` action'ов.
#### ModelViewSet
Полный набор action'ов: `list`, `retrieve`, `create`, `update`, `destroy`.
### PydanticSerializer
Для использования сериализатор на основе [pydantic](https://pydantic-docs.helpmanual.io/) необходимо наследовать
сериализатор от `PydanticSerializer`, указать в `Meta` `pydantic_model` и `pydantic_use_aliases` (при необходимости).
Параметр `pydantic_use_aliases` позволяет использовать [алиасы pydantic моделей](https://pydantic-docs.helpmanual.io/usage/model_config/#alias-precedence) для сериализации.
```python
class PydanticSerializer(PydanticSerializer):
class Meta:
pydantic_model = PydanticModel
pydantic_use_aliases = True
```
### Генерация схемы
Поддерживается генерация схемы openapi версий 3.0.2 и 3.1.0.
Схема по умолчанию задается параметром `API_DEFAULT_OPENAPI_VERSION` и равна `3.0.2`.
Пример генерации схемы (версия из settings):
```shell
python3 ./manage.py generateschema --urlconf api.v1.urls --generator_class restdoctor.rest_framework.schema.RefsSchemaGenerator > your_app/static/openapi.schema
```
Пример генерации схемы версии openapi 3.0.2:
```shell
python3 ./manage.py generateschema --urlconf api.v1.urls --generator_class restdoctor.rest_framework.schema.RefsSchemaGenerator30 > your_app/static/openapi-30.schema
```
Пример генерации смхемы версии openapi 3.1.0:
```shell
python3 ./manage.py generateschema --urlconf api.v1.urls --generator_class restdoctor.rest_framework.schema.RefsSchemaGenerator31 > your_app/static/openapi-31.schema
```
#### Опции генерации
##### API_STRICT_SCHEMA_VALIDATION
- делает обязательным использование описаний у полей (`help_text`, `verbose_name` у модели)
- проверяет на совпадение аннотацию поля и атрибут `allow_null`
- проверяет на совпадение аннотацию поля и атрибут `many`
Если какая-то проверка не проходит, генерация схемы завершается ошибкой.
##### API_SCHEMA_PRIORITIZE_SERIALIZER_PARAMETERS
При включении этой опции для схемы будут выбираться поля сериализатора, даже если они дублируют существующие.
##### API_SCHEMA_FILTER_MAP_PATH
Путь до кастомной схемы обработки фильтров для `DjangoFilterBackend`, по умолчанию - `restdoctor.rest_framework.schema.filters.FILTER_MAP`.
### pre-commit
Этот репозиторий использует git-хуки настроенные с помощью [pre-commit](https://pre-commit.com)
поэтому если планируется дальнейшее внесение изменений в репозиторий необходимо инициализировать
pre-commit с помощью следующей команды:
```shell script
make install-hooks
```
| /restdoctor-0.0.61.tar.gz/restdoctor-0.0.61/README.md | 0.556882 | 0.837819 | README.md | pypi |
from django.db.models.fields import BLANK_CHOICE_DASH
from django.utils.translation import ugettext as _
from rest_framework import serializers as rf_serializers
from rest_framework import fields as rf_fields
from copy import copy
import warnings
class CheckRequiredField(rf_fields.BooleanField):
"""
CheckRequiredField
definable_serializer.extra_fields.CheckRequiredField
"""
def to_internal_value(self, data):
data = super().to_internal_value(data)
if data == "" or data is None or data is False:
self.fail('required')
return data
def __init__(self, *args, **kwargs):
kwargs["style"] = {'base_template': 'checkbox.html',}
super().__init__(*args, **kwargs)
class MultipleCheckboxField(rf_fields.MultipleChoiceField):
"""
MultipleCheckboxField
definable_serializer.extra_fields.MultipleCheckboxField
"""
def __init__(self, *args, required=False, inline=False, **kwargs):
self.requred = required
kwargs["style"] = {
'base_template': 'checkbox_multiple.html',
'inline': inline,
}
super().__init__(*args, **kwargs)
def to_internal_value(self, data):
data = super().to_internal_value(data)
if self.requred and not len(data):
self.fail('required')
return data
class ChoiceWithBlankField(rf_fields.ChoiceField):
"""
ChoiceWithBlankField
definable_serializer.extra_fields.ChoiceWithBlankField
"""
def __init__(self, choices, *args, blank_label=None, **kwargs):
warnings.warn(
"ChoiceWithBlankField' will be deprecated in the future. "
"Please use to 'RequireChoiceField'.",
PendingDeprecationWarning
)
blank_choices = copy(BLANK_CHOICE_DASH)
blank_label = blank_label or blank_choices[0][1]
if blank_label:
blank_choices = [["", blank_label],]
choices = tuple(blank_choices + list(choices))
super().__init__(choices, *args, **kwargs)
def to_internal_value(self, data):
data = super().to_internal_value(data)
if data == "" or data is None:
self.fail('required')
return data
class ChoiceRequiredField(rf_fields.ChoiceField):
"""
ChoiceRequiredField
definable_serializer.extra_fields.ChoiceRequiredField
"""
def __init__(self, choices, *args, **kwarsg):
super().__init__(choices, *args, **kwarsg)
first_value = choices[0][0]
if first_value is not None:
raise ValueError(
"first choice value must be blank or None. "
"({}, {})".format(choices[0][0], choices[0][1]))
def to_internal_value(self, data):
data = super().to_internal_value(data)
if data == "" or data is None or data is False:
self.fail('required')
return data
class RadioField(rf_fields.ChoiceField):
"""
RadioField
definable_serializer.extra_fields.RadioField
"""
def __init__(self, *args, inline=False, **kwargs):
kwargs["style"] = {
'base_template': 'radio.html',
'inline': inline,
}
super().__init__(*args, **kwargs)
class TextField(rf_fields.CharField):
"""
TextField
definable_serializer.extra_fields.TextField
"""
def __init__(self, *args, rows=5, placeholder="", **kwargs):
warnings.warn(
"TextField will be deprecated in the future.",
PendingDeprecationWarning
)
style = kwargs.get("style", dict())
style.update({
'base_template': 'textarea.html',
'rows': rows,
'placeholder': kwargs.get("style", {}).get("placeholder", placeholder)
})
kwargs["style"] = style
super().__init__(*args, **kwargs) | /restframework_definable_serializer-0.1.13-py3-none-any.whl/definable_serializer/extra_fields/__init__.py | 0.68721 | 0.226067 | __init__.py | pypi |
from rdflib import URIRef
MIGRATION_OVERRIDE_MESSAGE = 'Override this method to return an rdflib.Graph.'
def on_add(uri):
"""
Decorator for RDFMigration class methods.
Use this to mark a method as handling the introduction of a
particular subject URI. Each method can be decorated only once in
this way, although it is possible to mark a method as handling
both an addition and a removal. Conversely, each URI can be
handled by only one method, although both its addition and its
removal can be handled.
"""
def decorator(method):
method.__handles_addition__ = URIRef(uri)
return method
return decorator
def on_remove(uri):
"""
Decorator for RDFMigration class methods.
Use this to mark a method as handling the removal of a particular
subject URI. Each method can be decorated only once in this way,
although it is possible to mark a method as handling both an
addition and a removal. Conversely, each URI can be handled by
only one method, although both its addition and its removal can
be handled.
"""
def decorator(method):
method.__handles_removal__ = URIRef(uri)
return method
return decorator
def on_present(uri):
"""
Decorator for RDFMigration class methods.
Use this to mark a method as handling the presence of a
particular predicate URI in the actual graph. Each method can
be decorated only once in this way.
"""
def decorator(method):
method.__handles_presence__ = URIRef(uri)
return method
return decorator
class MetaRDFMigration(type):
"""
Metaclass that ensures the presence of .add_handlers et al.
This should be considered an implementation detail of RDFMigration.
"""
def __new__(cls, name, bases, namespace, **kwargs):
augmented = super().__new__(cls, name, bases, namespace, **kwargs)
add_handlers = {}
del_handlers = {}
pres_handlers = {}
for attribute_name, value in augmented.__dict__.items():
add_uri = getattr(value, '__handles_addition__', None)
del_uri = getattr(value, '__handles_removal__', None)
pres_uri = getattr(value, '__handles_presence__', None)
if add_uri:
add_handlers[add_uri] = attribute_name
if del_uri:
del_handlers[del_uri] = attribute_name
if pres_uri:
pres_handlers[pres_uri] = attribute_name
augmented.add_handlers = add_handlers
augmented.remove_handlers = del_handlers
augmented.presence_handlers = pres_handlers
return augmented
class RDFMigration(metaclass=MetaRDFMigration):
"""
Base class for RDF migrations.
At a minimum, every derived class should override the .actual and
.desired member functions, both of which must return an
rdflib.Graph. If you are assigning free functions to these
members, for example imported from another module, make sure to
wrap them with staticmethod().
By default, .actual() will be updated to match .desired()
exactly. If you need to do extra work, define additional methods
that are decorated with @on_add or @on_remove (documented above).
These methods should take three arguments, `self`, `actual` and
`conjunctive`, where `actual` is the result of `self.actual()`
and `conjunctive` is the full conjunctive graph from the Django
store. The return value is ignored; migration methods should take
effect by directly modifying either `actual` or `conjunctive`.
Migrations are executed in the following order:
1. Add new triples from the desired graph to the actual graph.
2. Execute addition handler methods (relative order undefined).
3. Execute removal handler methods (relative order undefined).
4. Remove triples from the actual that disappeared in the desired.
A migration class is instantiated before application, so in
principle, you can make a migration stateful. However, keep in
mind that the relative order of addition handlers and removal
handlers is undefined; for this reason, it is safer to keep the
migration stateless if possible.
If you define an __init__ method, give it a (self, *args, **kwargs)
signature in order to make it forward-compatible. Currently, no
arguments are passed, but this might change in the future.
"""
def actual(self):
raise NotImplementedError(MIGRATION_OVERRIDE_MESSAGE)
def desired(self):
raise NotImplementedError(MIGRATION_OVERRIDE_MESSAGE) | /restframework-rdf-1.0.tar.gz/restframework-rdf-1.0/rdf/migrations.py | 0.873012 | 0.3863 | migrations.py | pypi |
import random
import re
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from django.conf import settings
from rdflib import ConjunctiveGraph, Graph, Literal, URIRef
from rdflib.plugins.stores.sparqlstore import SPARQLStore
from rdflib.plugins.stores.sparqlconnector import (
SPARQLConnector, SPARQLConnectorException, _response_mime_types)
from typing import Optional
from .ns import OA, XSD, DCTERMS
PREFIX_PATTERN = re.compile(r'PREFIX\s+(\w+):\s*<\S+>', re.IGNORECASE)
def get_conjunctive_graph():
""" Returns the conjunctive graph of our SPARQL store. """
return ConjunctiveGraph(settings.RDFLIB_STORE)
def prune_triples(graph, triples):
"""Remove all items in iterable `triples` from `graph` (modify in place)."""
for triple in triples:
graph.remove(triple)
def prune_triples_cascade(graph, triples, graphs_applied_to=[], privileged_predicates=[]):
"""
Recursively remove subjects in `triples` and all related resources from `graph`.
Specify which graphs qualify, i.e. from which triples will be deleted, in `graphs_applied_to`.
Optionally, skip items related via specific (privileged) predicates.
"""
for triple in triples:
prune_recursively(
graph, triple[0], graphs_applied_to, privileged_predicates
)
def prune_recursively(graph, subject, graphs_applied_to=[], privileged_predicates=[]):
"""
Recursively remove subject and all related resources from `graph`.
Specify which graphs qualify, i.e. from which triples will be deleted, in `graphs_applied_to`.
Optionally, skip deletion of (i.e. keep) items related via specific (privileged) predicates.
"""
related_by_subject = list(graph.quads((subject, None, None)))
for s, p, o, c in related_by_subject:
if isinstance(o, URIRef) and o != s and p not in privileged_predicates and c in graphs_applied_to:
prune_recursively(graph, o, graphs_applied_to,
privileged_predicates)
prune_triples(graph, related_by_subject)
def append_triples(graph, triples):
""" Add all items in iterable `triples` to `graph` (modify in place). """
for triple in triples:
graph.add(triple)
def graph_from_triples(triples, ctor=Graph):
""" Return a new Graph containing all items in iterable `triples`. """
graph = ctor()
append_triples(graph, triples)
return graph
def sample_graph(graph, subjects, request):
""" Return a random sample from a graph, optionally filtering with a list containing [predicate, object]. """
n_results = int(request.GET.get('n_results'))
if len(subjects) > n_results:
sampled_subjects = random.sample(list(subjects), n_results)
else:
sampled_subjects = subjects
output = Graph()
for sub in sampled_subjects:
suggestions = graph.triples((sub, None, None))
[output.add(s) for s in suggestions]
return output
def traverse_forward(full_graph, fringe, plys):
"""
Traverse `full_graph` by object `plys` times, starting from `fringe`.
Returns a graph with all triples accumulated during the traversal,
excluding `fringe`.
"""
result = Graph()
visited_objects = set()
while plys > 0:
objects = set(fringe.objects()) - visited_objects
if not len(objects):
break
fringe = Graph()
for o in objects:
if not isinstance(o, Literal):
append_triples(fringe, full_graph.triples((o, None, None)))
result |= fringe
visited_objects |= objects
plys -= 1
return result
def traverse_backward(full_graph, fringe, plys):
"""
Traverse `full_graph` by subject `plys` times, starting from `fringe`.
Returns a graph with all triples accumulated during the traversal,
excluding `fringe`. This result always contains complete
resources, i.e., all triples of each subject in the graph are
included.
"""
result = Graph()
subjects = set(fringe.subjects())
visited_subjects = set()
while plys > 0:
if not len(subjects):
break
fringe = Graph()
fringe_subjects = set()
for s in subjects:
parents = set(full_graph.subjects(None, s))
for ss in parents - fringe_subjects:
append_triples(fringe, full_graph.triples((ss, None, None)))
fringe_subjects |= parents
result |= fringe
visited_subjects |= subjects
subjects = set(fringe.subjects()) - visited_subjects
plys -= 1
return result
def latin1_to_utf8(original: str) -> str:
try:
return original.encode('latin-1').decode()
except (UnicodeDecodeError, UnicodeEncodeError):
return original
def find_latin1_triples(graph: Graph) -> Graph:
query = r'''CONSTRUCT {{?s ?p ?o }}
WHERE {{
GRAPH <{}> {{
?s ?p ?o;
dcterms:created ?date .
FILTER(?date > "2022-05-10T00:00:00.000000+00:00"^^xsd:dateTime)
FILTER(datatype(?o)=xsd:string)
FILTER(regex(?o, "[\\x80-\\xFF]"))
}}
}}
'''.format(graph.identifier)
res = graph.query(query, initNs={'xsd': XSD, 'dcterms': DCTERMS})
g = graph_from_triples(res)
print(f'found {len(g)} latin-1 triples in graph {graph.identifier}')
return g
def find_latin1_preannos(graph: Graph, source_graph: Graph) -> Graph:
query = r'''CONSTRUCT {{?selector ?pred ?obj}}
WHERE {{
GRAPH <{}> {{
?s oa:hasTarget ?target .
?target oa:hasSource ?source ;
oa:hasSelector ?selector .
?selector ?pred ?obj
FILTER(datatype(?obj)=xsd:string)
FILTER(regex(?obj, "[\\x80-\\xFF]"))
}}
GRAPH <{}> {{
?source dcterms:created ?date .
FILTER(?date > "2022-05-10T00:00:00.000000+00:00"^^xsd:dateTime)
}}
}}
'''.format(graph.identifier, source_graph.identifier)
res = graph.query(query, initNs={'xsd': XSD, 'dcterms': DCTERMS, 'oa': OA})
g = graph_from_triples(res)
print(f'found {len(g)} latin-1 triples in graph {graph.identifier}')
return g
def recode_latin1_triples(g: Graph, latin1_triples: Graph, commit=False) -> None:
'''Find and recodes latin1-encoded strings to utf-8
If commit, also replace them in the triplestore.
'''
cnt = 0
for (s, p, o) in latin1_triples:
recoded = latin1_to_utf8(o)
if o != recoded:
if not commit:
# manual sanity check
print(o)
print(recoded)
print('---')
else:
g.add((s, p, Literal(recoded)))
g.remove((s, p, o))
cnt += 1
print(f'updated {cnt} triples')
def patched_inject_prefixes(self, query, extra_bindings):
''' Monkeypatch for SPARQLStore prefix injection
Parses the incoming query for prefixes,
and ignores these when injecting additional namespaces.
Better implementation is possibly available,
e.g. use rdfblibs query parser to extract prefixes.
'''
query_prefixes = re.findall(PREFIX_PATTERN, query)
# prefixes available in the query should be deducted from the store's nsBindings
# prefixes that were provided through initNs should take precedence over all others
bindings = {x for x in set(self.nsBindings.items())
if x[0] not in query_prefixes}
bindings |= set(extra_bindings.items())
# remove the extra bindings from the original query
for k in set(extra_bindings.keys()):
if k in query_prefixes:
replace_pattern = re.compile(
fr'PREFIX\s+{k}:\s*<.+>', re.IGNORECASE)
query = re.sub(replace_pattern, '', query)
if not bindings:
return query
return "\n".join(
[
"\n".join(["PREFIX %s: <%s>" % (k, v) for k, v in bindings]),
"", # separate ns_bindings from query with an empty line
query,
]
)
def patched_sparqlconnector_update(self, query,
default_graph: Optional[str] = None,
named_graph: Optional[str] = None):
'''Monkeypatch for SPARQLConnector's update method
Changes Content-Type header to include utf-8 charset
'''
if not self.update_endpoint:
raise SPARQLConnectorException("Query endpoint not set!")
params = {}
if default_graph is not None:
params["using-graph-uri"] = default_graph
if named_graph is not None:
params["using-named-graph-uri"] = named_graph
# Single difference from original method, changing Content-Type header
headers = {
"Accept": _response_mime_types[self.returnFormat],
"Content-Type": "application/sparql-update; charset=utf-8",
}
args = dict(self.kwargs) # other QSAs
args.setdefault("params", {})
args["params"].update(params)
args.setdefault("headers", {})
args["headers"].update(headers)
qsa = "?" + urlencode(args["params"])
res = urlopen(
Request(self.update_endpoint + qsa, data=query.encode(),
headers=args["headers"])
)
# Apply monkeypatches
SPARQLStore._inject_prefixes = patched_inject_prefixes
SPARQLConnector.update = patched_sparqlconnector_update | /restframework-rdf-1.0.tar.gz/restframework-rdf-1.0/rdf/utils.py | 0.664431 | 0.244453 | utils.py | pypi |
import inspect
import json
from collections.abc import Callable
from typing import Any, Dict, Optional
from restful_aws_lambda.request import Request
class Route: # pylint: disable=C0103,R0903
"""
Lambda handler decorator core class.
"""
def __init__(self, lambda_handler: Callable, json_dumps_options: dict):
self._handler: Callable = lambda_handler
self._json_dumps_options: dict = json_dumps_options
def restful(self) -> Callable:
"""Build and return a restful lambda_handler."""
signature = inspect.signature(self._handler, follow_wrapped=True)
handler_args = list(signature.parameters)
def inner_func(event, context) -> dict:
func_args_values: dict = {}
request: Request = Request(event)
for arg in handler_args:
if arg in request.path_params:
func_args_values[arg] = request.path_params.get(arg)
elif arg == "event":
func_args_values["event"] = event
elif arg == "context":
func_args_values["context"] = context
elif arg == "request":
func_args_values["request"] = request
else:
raise TypeError(
f"handler got an unexpected argument '{arg}'"
)
response = self._handler(**func_args_values)
if not isinstance(response, tuple):
response = (response,)
return self._format_response(*response)
return inner_func
def _format_response(
self, code: int, body: Any = None, headers: Optional[dict] = None
) -> Dict[str, Any]:
"""
Format the handler's response to the expected Lambda response format.
"""
if not isinstance(code, int):
raise TypeError(f"Invalid status code. {type(code)} is not int.")
if type(headers) not in [type(None), dict]:
raise TypeError(
f"Invalid headers. {type(headers)} is not in [NoneType, dict]."
)
response: Dict[str, Any] = {"statusCode": code}
if body is not None:
response["body"] = json.dumps(body, **self._json_dumps_options)
if headers is not None:
response["headers"] = headers
return response | /restful_aws_lambda-0.1.2-py3-none-any.whl/restful_aws_lambda/route.py | 0.815747 | 0.196248 | route.py | pypi |
import json
from typing import Optional
class Request:
"""
Request objects created in @route handlers, and accessible
from within the handler as the "request" argument.
It contains parsed information about the API Gateway event.
Parameters
----------
event : dict
The API Gateway event.
"""
def __init__(self, event: dict):
if not isinstance(event, dict):
raise TypeError(f"Invalid event. {type(event)} is not dict.")
self.event = event
@property
def body(self) -> Optional[str]:
"""
Return the raw body field of the API Gateway event.
Examples
--------
>>> event = {"body": '{"name": "John Doe"}'}
>>> Request(event).body
'{"name": "John Doe"}'
"""
return self.event.get("body")
@property
def json(self) -> Optional[dict]:
"""
Return a dict parsed from the body field of the API Gateway event.
Examples
--------
>>> event = {"body": '{"name": "John Doe"}'}
>>> Request(event).json["name"]
'John Doe'
"""
if self.body is not None:
return json.loads(self.body)
return None
@property
def path_params(self) -> dict:
"""
Return the pathParameters field of the API Gateway event.
Examples
--------
>>> event = {"pathParameters": {"user_id": 123}}
>>> Request(event).path_params
{'user_id': 123}
"""
return self._get_event_key_none_as_empty("pathParameters")
@property
def query_params(self) -> dict:
"""
Return the queryStringParameters field of the API Gateway event.
Examples
--------
>>> event = {"queryStringParameters": {"page": "3"}}
>>> Request(event).query_params
{'page': '3'}
"""
return self._get_event_key_none_as_empty("queryStringParameters")
@property
def method(self) -> str:
"""
Return the request's HTTP method.
Examples
--------
>>> event = {"httpMethod": "GET"}
>>> Request(event).method
'GET'
"""
return self.event["httpMethod"]
@property
def headers(self) -> dict:
"""
Return the request's headers.
Examples
--------
>>> event = {"headers": {"accept": "*/*"}}
>>> Request(event).headers
{'accept': '*/*'}
"""
return self.event["headers"]
def _get_event_key_none_as_empty(self, key: str) -> dict:
"""
Get specific key from event object.
"""
value = self.event.get(key)
if value is None:
return {}
return value | /restful_aws_lambda-0.1.2-py3-none-any.whl/restful_aws_lambda/request.py | 0.845145 | 0.399577 | request.py | pypi |
from typing import Dict, List, Callable, Union
from urllib.parse import urljoin
import requests
import json
from functools import wraps
class NoneEtag(IOError):
"""None etag from api"""
pass
class APIClient(object):
"""
A dmicros api connection
====
Provides api functions
"""
def __init__(self, api_root: str, auth: Dict[str, str]) -> None:
self.api_root = api_root
self.auth = auth
# apply wrappers without decorater sugar
self.get = self.auth_headers(self.abs_url(self.get_inner))
self.post = self.encode_data(self.auth_headers(self.abs_url(self.post_inner)))
self.method_with_etag = self.encode_data(
self.auth_headers(self.abs_url(self.method_with_etag_inner))
)
def get_authorization_header(self) -> str:
"""
get authorization header from property
"""
return self.auth.get("token", "")
def auth_headers(self, f: Callable) -> Callable:
@wraps(f)
def wrapper(*args, **kwargs):
headers = kwargs.get("headers", {}).copy()
headers.update({"Authorization": self.get_authorization_header()})
kwargs["headers"] = headers
return f(*args, **kwargs)
return wrapper
def abs_url(self, f: Callable) -> Callable:
@wraps(f)
def wrapper(url, *args, **kwargs):
return f(urljoin(self.api_root, url), *args, **kwargs)
return wrapper
def encode_data(self, f: Callable) -> Callable:
@wraps(f)
def wrapper(*args, **kwargs):
data = json.dumps(kwargs.get("data", {}))
kwargs["data"] = data
headers = kwargs.get("headers", {}).copy()
headers.update({"Content-Type": "application/json; charset=utf-8"})
kwargs["headers"] = headers
return f(*args, **kwargs)
return wrapper
def get_inner(self, url: str, headers: Dict = {}) -> requests.Response:
"""method GET"""
return requests.get(url, headers=headers)
def post_inner(
self, url: str, data: Union[List, Dict] = {}, headers: Dict = {}
) -> requests.Response:
"""method POST"""
return requests.post(url, data=data, headers=headers)
def method_with_etag_inner(
self, url: str, etag: str, data: Union[List, Dict], headers: Dict, method: str
) -> requests.Response:
"""method using etag"""
headers["If-Match"] = etag
return requests.__getattribute__(method)(url, data=data, headers=headers)
def method_auto_etag(
self, url: str, data: Union[List, Dict], headers: Dict, method: str
) -> requests.Response:
"""method using etag, retrieve etag automatically in advance"""
res = self.get(url, headers=headers)
if res.status_code != 200:
raise NoneEtag("Fail to get from url")
try:
etag = res.json()["_etag"]
except KeyError:
raise NoneEtag("None etag in response")
return self.method_with_etag(
url, etag, data=data, headers=headers, method=method
)
def patch(
self, url: str, etag: str = "", data: Union[List, Dict] = {}, headers: Dict = {}
) -> requests.Response:
"""method patch"""
return self.method_with_etag(
url, etag=etag, data=data, headers=headers, method="patch"
)
def patch_auto_etag(
self, url: str, data: Union[List, Dict] = {}, headers: Dict = {}
) -> requests.Response:
"""method patch, auto handle etag"""
return self.method_auto_etag(url, data=data, headers=headers, method="patch")
def delete(
self, url: str, etag: str = "", data: Union[List, Dict] = {}, headers: Dict = {}
) -> requests.Response:
"""method delete"""
return self.method_with_etag(
url, etag=etag, data=data, headers=headers, method="delete"
)
def delete_auto_etag(
self, url: str, data: Union[List, Dict] = {}, headers: Dict = {}
) -> requests.Response:
"""method delete, auto handle etag"""
return self.method_auto_etag(url, data=data, headers=headers, method="delete") | /restful-client-lite-0.0.5.tar.gz/restful-client-lite-0.0.5/restful_client_lite/clients.py | 0.829561 | 0.229492 | clients.py | pypi |
import hashlib
import datetime
from functools import wraps
import urllib.parse
from typing import Callable, Dict, Tuple
from restful_client_lite import APIClient
class WangduoyunApiClient(APIClient):
"""
WangDuoYun api client
Notice:
See docs at https://docs.wangduoyun.com/develop/overview/aboutus.html
"""
def __init__(self, api_root: str, auth: Dict[str, str]) -> None:
"""
auth : {user_key, user_secret}
"""
super(WangduoyunApiClient, self).__init__(api_root, auth)
# apply wrappers without decorater sugar
self.get = self.abs_url(self.get_inner)
self.post = self.set_default_data(self.abs_url(self.post_inner))
def get_sign(self) -> Tuple[str, int]:
"""
get authorized sign and its timestamp
"""
md5 = hashlib.md5()
timestamp = int(datetime.datetime.now().timestamp())
md5.update(
(self.auth["user_key"] + str(timestamp) + self.auth["user_secret"]).encode(
"utf-8"
)
)
sign = md5.hexdigest()
return sign, timestamp
def set_default_data(self, f: Callable) -> Callable:
@wraps(f)
def wrapper(*args, **kwargs):
data = kwargs.get("data", {})
sign, timestamp = self.get_sign()
data.update(
{
"user_key": self.auth["user_key"],
"timestamp": timestamp,
"sign": sign,
}
)
kwargs["data"] = data
return f(*args, **kwargs)
return wrapper
def abs_url(self, f: Callable) -> Callable:
@wraps(f)
def wrapper(url, *args, **kwargs):
return f(urllib.parse.urljoin(self.api_root, url), *args, **kwargs)
return wrapper
class WangduoyunGraphqlClient(WangduoyunApiClient):
"""
WangDuoYun api client
Notice:
See docs at https://docs.wangduoyun.com/develop/overview/aboutus.html
"""
def __init__(self, api_root: str, auth: Dict[str, str]) -> None:
"""
auth : {user_key, user_secret}
"""
super(WangduoyunApiClient, self).__init__(api_root, auth)
# apply wrappers without decorater sugar
self.get = self.abs_auth_url(self.get_inner)
self.post = self.set_default_data(self.abs_url(self.post_inner))
def get_sign(self) -> Tuple[str, int]:
"""
get authorized sign and its timestamp
"""
md5 = hashlib.md5()
timestamp = int(datetime.datetime.now().timestamp())
md5.update(
(self.auth["user_key"] + str(timestamp) + self.auth["user_secret"]).encode(
"utf-8"
)
)
sign = md5.hexdigest()
return sign, timestamp
def set_default_data(self, f: Callable) -> Callable:
@wraps(f)
def wrapper(*args, **kwargs):
data = kwargs.get("data", {})
sign, timestamp = self.get_sign()
data.update(
{
"user_key": self.auth["user_key"],
"timestamp": timestamp,
"sign": sign,
}
)
kwargs["data"] = data
return f(*args, **kwargs)
return wrapper
def abs_auth_url(self, f: Callable) -> Callable:
@wraps(f)
def wrapper(url, *args, **kwargs):
sign, timestamp = self.get_sign()
auth_part = "?user_key={user_key}×tamp={timestamp}&sign={sign}&source_id={source_id}".format(
user_key=self.auth["user_key"],
source_id=self.auth["source_id"],
sign=sign,
timestamp=timestamp,
)
return f(
urllib.parse.urljoin(self.api_root, auth_part + url), *args, **kwargs
)
return wrapper | /restful-client-lite-0.0.5.tar.gz/restful-client-lite-0.0.5/restful_client_lite/contrib/wangduoyun.py | 0.796688 | 0.234889 | wangduoyun.py | pypi |
from django.http import HttpResponse, HttpRequest
from .meta import RouteMeta
# 注册的路由中间件列表
MIDDLEWARE_INSTANCE_LIST = []
def register_middlewares(*middlewares):
"""
注册中间件,注册的中间件将按顺序执行
:param middlewares:
:return:
"""
for middleware_cls in middlewares:
MIDDLEWARE_INSTANCE_LIST.append(middleware_cls())
# noinspection PyMethodMayBeStatic,PyUnusedLocal
class MiddlewareBase:
"""
路由中间件基类
"""
def process_request(self, request: HttpRequest, meta: RouteMeta, **kwargs):
"""
对 request 对象进行预处理。一般用于请求的数据的解码,此时路由组件尚水进行请求数据的解析(B,P,G 尚不可用)
:param request:
:param meta:
:return: 返回 HttpResponse 以终止请求,返回 False 以停止执行后续的中间件(表示访问未授权),返回 None 或不返回任何值继续执行后续中间件
"""
pass
def process_invoke(self, request: HttpRequest, meta: RouteMeta, **kwargs):
"""
在路由函数调用前,对其参数等进行处理,此时路由组件已经完成了请求数据的解析(B,P,G 已可用)
此时可以对解析后的参数进行变更
:param request:
:param meta:
:return: 返回 HttpResponse 以终止请求,返回 False 以停止执行后续的中间件(表示访问未授权),返回 None 或不返回任何值继续执行后续中间件
"""
pass
def process_return(self, request: HttpRequest, meta: RouteMeta, **kwargs):
"""
在路由函数调用后,对其返回值进行处理
:param request:
:param meta:
:param kwargs: 始终会有一个 'data' 的项,表示路由返回的原始数据
:return: 返回 HttpResponse 以终止执行,否则返回新的 数据
"""
assert 'data' in kwargs
return kwargs['data']
def process_response(self, request: HttpRequest, meta: RouteMeta, **kwargs) -> HttpResponse:
"""
对 response 数据进行预处理。一般用于响应的数据的编码
:param request:
:param meta:
:param kwargs: 始终会有一个 'response' 的项,表示路由返回的原始 HttpResponse
:return: 无论何种情况,应该始终返回一个 HttpResponse
:rtype: HttpResponse
"""
assert 'response' in kwargs
return kwargs['response']
class MiddlewareManager:
"""
路由中间件管理器
"""
def __init__(self, request: HttpRequest, meta: RouteMeta):
# HTTP请求对象
self.request = request
# 元数据信息
self.meta = meta
def begin(self):
for middleware in MIDDLEWARE_INSTANCE_LIST:
if not hasattr(middleware, 'process_request'):
continue
result = middleware.process_request(self.request, self.meta)
if isinstance(result, HttpResponse):
return result
# 返回 False 以阻止后续中间件执行
if result is False:
return
def before_invoke(self):
"""
在路由函数调用前,对其参数等进行处理
:return:
"""
for middleware in MIDDLEWARE_INSTANCE_LIST:
if not hasattr(middleware, 'process_invoke'):
continue
result = middleware.process_invoke(self.request, self.meta)
if isinstance(result, HttpResponse):
return result
# 返回 False 以阻止后续中间件执行
if result is False:
return
def process_return(self, data):
"""
在路由函数调用后,对其返回值进行处理
:param data:
:return:
"""
for middleware in MIDDLEWARE_INSTANCE_LIST:
if not hasattr(middleware, 'process_return'):
continue
result = middleware.process_return(self.request, self.meta, data=data)
# 返回 HttpResponse 终止
if result is HttpResponse:
return result
# 使用原数据
data = result
return data
def end(self, response):
"""
在响应前,对响应的数据进行处理
:param response:
:return:
"""
# 对 response 进行处理
for middleware in MIDDLEWARE_INSTANCE_LIST:
if not hasattr(middleware, 'process_response'):
continue
response = middleware.process_response(self.request, self.meta, response=response)
return response | /restful_dj-2.0.1-py3-none-any.whl/restful_dj/middleware.py | 0.48121 | 0.216384 | middleware.py | pypi |
from types import MethodType
class RouteMeta:
"""
路由元数据
"""
def __init__(self,
handler: MethodType,
func_args,
route_id=None,
module=None,
name=None,
kwargs=None):
"""
:param handler: 路由处理函数对象
:param func_args: 路由处理函数参数列表
:param route_id: 路由ID,此ID由路由相关信息组合而成
:param module: 装饰器上指定的 module 值
:param name: 装饰器上指定的 name 值
:param kwargs: 装饰器上指定的其它参数
"""
self._handler = handler
self._func_args = func_args
self._id = route_id
self._module = module
self._name = name
self._kwargs = {} if kwargs is None else kwargs
@property
def handler(self) -> MethodType:
"""
路由处理函数对象
:return:
"""
return self._handler
@property
def func_args(self):
"""
路由处理函数参数列表
:return:
:rtype: OrderedDict
"""
return self._func_args
@property
def id(self) -> str:
"""
路由ID,此ID由路由相关信息组合而成
:return:
"""
return self._id
@property
def module(self) -> str:
"""
装饰器上指定的 module 值
:return:
"""
return self._module
@property
def name(self) -> str:
"""
装饰器上指定的 name 值
:return:
"""
return self._name
@property
def kwargs(self) -> dict:
"""
装饰器上指定的其它参数
:return:
:rtype: Dict
"""
return self._kwargs
def has(self, arg_name):
"""
指定的参数是否存在
:param arg_name:
:return:
"""
return arg_name in self._kwargs
def get(self, arg_name: str, default_value=None):
"""
获取指定参数的值
:param default_value:
:param arg_name:
:return:
"""
return self._kwargs[arg_name] if arg_name in self._kwargs else default_value | /restful_dj-2.0.1-py3-none-any.whl/restful_dj/meta.py | 0.572364 | 0.241143 | meta.py | pypi |
from enum import Enum
from typing import Any, Callable, Dict, List, Optional
class ArgType(Enum):
INTEGER = 'INTEGER'
FLOAT = 'FLOAT'
STRING = 'STRING'
BOOLEAN = 'BOOLEAN'
LIST = 'LIST'
DICT = 'DICT'
class ArgValidateResult:
__slots__ = ['is_ok', 'value']
def __init__(self, is_ok: bool, value: Optional[Any]):
self.is_ok = is_ok
self.value = value
def validate_arg(
value: Any,
arg_type: ArgType) -> ArgValidateResult:
try:
stringified = str(value)
if arg_type == ArgType.INTEGER:
if isinstance(value, float):
return ArgValidateResult(True, int(value))
return ArgValidateResult(True, int(stringified, 10))
elif arg_type == ArgType.FLOAT:
if isinstance(value, bool):
raise ValueError
return ArgValidateResult(True, float(value))
elif arg_type == ArgType.STRING:
return ArgValidateResult(True, stringified)
elif arg_type == ArgType.BOOLEAN:
lower = stringified.lower()
if lower in ['true', '1', 't', 'y', 'yes']:
ret = True
elif lower in ['false', '0', 'f', 'n', 'no']:
ret = False
else:
raise ValueError
return ArgValidateResult(True, ret)
elif arg_type == ArgType.LIST:
if not isinstance(value, list):
raise ValueError
return ArgValidateResult(True, value)
elif arg_type == ArgType.DICT:
if not isinstance(value, dict):
raise ValueError
return ArgValidateResult(True, value)
else:
raise NotImplementedError
except Exception:
return ArgValidateResult(False, None)
class ArgDefinition:
__slots__ = ['name', 'type', 'is_required', 'description']
def __init__(
self,
arg_name: str,
arg_type: ArgType,
is_required: bool,
description: str = ''):
self.name = arg_name
self.type = arg_type
self.is_required = is_required
self.description = description
def to_dict(self) -> Dict[str, Any]:
return {
'name': self.name,
'type': self.type.name,
'is_required': self.is_required,
'description': self.description,
}
class FunctionDefinition:
__slots__ = [
'func',
'arg_definitions',
'max_concurrency',
'description',
'function_name',
]
def __init__(
self,
func: Callable,
arg_definitions: List[ArgDefinition],
max_concurrency: int,
description: str,
function_name: str):
""".
Parameters
----------
func
Python Function
arg_definitions
A List of ArgDefinitions
max_concurrency
Max Concurrency
description
A Description for this Function.
function_name
Function Name. It is not necessary to be same with func.__name__
"""
self.func = func
self.arg_definitions = arg_definitions
self.max_concurrency = max_concurrency
self.description = description
self.function_name = function_name
def to_dict(self) -> Dict[str, Any]:
return {
'function_name': self.function_name,
'arg_definitions': [elm.to_dict() for elm in self.arg_definitions],
'max_concurrency': self.max_concurrency,
'description': self.description,
} | /restful_functions-2.0.1-py3-none-any.whl/restful_functions/modules/function.py | 0.860999 | 0.17892 | function.py | pypi |
import os.path
from typing import Any, MutableMapping
import structlog
from starlette_context import context
logging_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"json": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.processors.JSONRenderer(),
},
},
"handlers": {
"json": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "json",
},
"request_log": {
"level": "DEBUG",
"class": "logging.FileHandler",
"filename": "./logs/request_log.log",
"formatter": "json",
},
"all": {
"level": "DEBUG",
"class": "logging.handlers.RotatingFileHandler",
"filename": "./logs/all.log",
"maxBytes": 10485760, # 10MB #1024*1024*10
"backupCount": 50,
"encoding": "utf8",
"formatter": "json",
},
},
"loggers": {
"request": {
"handlers": ["json", "all", "request_log"],
"level": "INFO",
},
# "uvicorn": {"handlers": ["json"], "level": "INFO"},
# "uvicorn.error": {"handlers": ["json"], "level": "INFO"},
# "uvicorn.access": {
# "handlers": ["json"],
# "level": "INFO",
# "propagate": False,
# },
},
}
def setup_logging():
import logging.config
def add_app_context(
logger: logging.Logger,
method_name: str,
event_dict: MutableMapping[str, Any],
) -> MutableMapping[str, Any]:
if context.exists():
event_dict.update(context.data)
return event_dict
structlog.configure(
processors=[
structlog.processors.TimeStamper(fmt="iso"),
add_app_context,
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
],
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.AsyncBoundLogger,
cache_logger_on_first_use=True,
)
log_path = "./logs"
if not os.path.exists(path=log_path):
os.makedirs(log_path)
logging.config.dictConfig(logging_config) | /restful_starlette-0.0.4-py3-none-any.whl/restful_starlette/log/structlog.py | 0.498047 | 0.21428 | structlog.py | pypi |
from enum import IntEnum
from typing import Type, List, Union, Any, Dict
from pydantic import BaseModel, Field
from starlette import status
from starlette.background import BackgroundTask
from starlette.responses import JSONResponse
class StateEnum(IntEnum):
"""
用于表示业务逻辑状态的枚举类
"""
OK = 1
BROKEN = 2
class EmptyData(BaseModel):
"""
"""
class SuccessResponse(BaseModel):
message: str = '请求成功'
code: int = status.HTTP_200_OK
state: int = StateEnum.OK.value
# data: Union[BaseModel, List[BaseModel]] = Field(default=EmptyData())
data: Any = Field(default=EmptyData())
class FailResponse(BaseModel):
message: str = '请求失败'
code: int = status.HTTP_400_BAD_REQUEST
state: int = StateEnum.BROKEN.value
# data: Union[BaseModel, List[BaseModel]] = Field(default=EmptyData())
data: Any = Field(default=EmptyData())
class ErrorResponse(BaseModel):
message: str = '系统异常'
code: int = status.HTTP_500_INTERNAL_SERVER_ERROR
state: int = StateEnum.BROKEN.value
# data: Union[BaseModel, List[BaseModel]] = Field(default=EmptyData())
data: Any = Field(default=EmptyData())
class ResponseContentWrapperMixin:
SUCCESS_RESPONSE_CLASS = SuccessResponse
FAIL_RESPONSE_CLASS = FailResponse
ERROR_RESPONSE_CLASS = ErrorResponse
def success(self, msg: str = "请求成功", code: int = status.HTTP_200_OK, state: int = StateEnum.OK,
data: Union[BaseModel, List[BaseModel], Dict] = EmptyData(), background: BackgroundTask = None):
return JSONResponse(content=self.SUCCESS_RESPONSE_CLASS(message=msg, code=code, state=state, data=data).dict(),
background=background)
def fail(self, msg: str = "请求失败", code: int = status.HTTP_400_BAD_REQUEST, state: int = StateEnum.BROKEN,
data: Union[BaseModel, List[BaseModel], Dict] = EmptyData(), background: BackgroundTask = None):
return JSONResponse(content=self.FAIL_RESPONSE_CLASS(message=msg, code=code, state=state, data=data).dict(),
background=background)
def error(self, msg: str = "系统异常", code: int = status.HTTP_500_INTERNAL_SERVER_ERROR, state: int = StateEnum.BROKEN,
data: Union[BaseModel, List[BaseModel], Dict] = EmptyData(), background: BackgroundTask = None):
return JSONResponse(content=self.ERROR_RESPONSE_CLASS(message=msg, code=code, state=state, data=data).dict(),
background=background) | /restful_starlette-0.0.4-py3-none-any.whl/restful_starlette/http/response.py | 0.686055 | 0.225225 | response.py | pypi |
""" Integration for Google AppEngine secure python scaffold. """
from __future__ import absolute_import, unicode_literals
# stdlib imports
from collections import namedtuple
from types import FunctionType
from typing import List, Text, Tuple, Type
# 3rd party imports
from restible import (
RestEndpoint,
RestResource,
)
# GAE bundled imports
import webapp2
HandlerClass = Type[webapp2.RequestHandler]
ResourceClass = Type[RestResource]
EndpointClass = Type[RestEndpoint]
ResourceMapping = Tuple[Text, ResourceClass]
RouteConf = namedtuple('RouteConf', 'anon auth admin')
def handler(base_cls, methods=None):
# type: (HandlerClass, List[Text]) -> FunctionType
""" A short-cut for defining routes as functions and not classes.
Returns the given function wrapped inside a dynamically generated
handler class that derives from the given *base_cls*. Only the selected
methods ('get' by default) will be implemented and will just call the
function wrapped by this decorator.
The wrapped function will receive an instance of the generated handler as
it's only argument. You can use the handler passed in the ``handler``
argument the same way you would use ``self`` inside a regular class
based webapp2 handler.
Args:
base_cls (Type[app.base.BaseHandler]):
A handler class to use as a base class for the generated wrapper
handler.
methods (list[str]):
A list of HTTP methods that should be allowed on this handler.
Returns:
A class based handler that just calls the function wrapped by this
decorator.
Examples:
>>> from app.base import handlers
>>>
>>> @handler(handlers.AuthenticatedAjaxHandler)
... def my_route(handler):
... handler.response.set_status(200)
... handler.render_json({"msg": "hello, world"})
"""
methods = methods or ['get']
def decorator(fn): # pylint: disable=missing-docstring
wrapper = type(fn.__name__, (base_cls,), {})
wrapper.wrapped_view = fn
# Only add methods that are allowed.
for http_method in methods:
method_name = webapp2._normalize_handler_method(http_method)
setattr(
wrapper,
method_name,
lambda self, *args, **kw: self.wrapped_view(*args, **kw)
)
return wrapper
return decorator
# Used only by type hint comments.
del FunctionType, List | /restible-appengine-0.5.1.tar.gz/restible-appengine-0.5.1/src/restible_appengine/secure/decorators.py | 0.882288 | 0.180395 | decorators.py | pypi |
""" restible-swagger decorators. """
from __future__ import absolute_import, unicode_literals
# stdlib imports
from types import FunctionType
from typing import Any, Dict, List
# 3rd party imports
import attr
# local imports
from . import util
@attr.s
class RouteMeta(object):
""" A helper class to store all the metadata about a given route.
"""
responses = attr.ib(type=Dict[int, Any], default=None)
route_params = attr.ib(type=List[Any], default=None)
PARAM_NAME = '_restible_route_meta'
def __attrs_post_init__(self):
self.responses = self.responses or {}
self.route_params = self.route_params or []
def set_response(self, status_code, response_def):
# type: (int, Dict[Text, Any]) -> None
""" Set route response for the given HTTP status code. """
self.responses[status_code] = response_def
@classmethod
def load(cls, fn):
# type: (FunctionType) -> RouteMeta
""" Load (or create) metadata for the given route.
If the metadata is not yet saved on the handler, a new instance of
`RouteMeta` will be created. You will need to save it manually in order
for it to be persisted on the handler.
"""
meta = getattr(fn, cls.PARAM_NAME, RouteMeta())
meta.fn = fn
return meta
def save(self):
# type: () -> None
""" Save route metadata into the handler it was loaded for. """
setattr(self.fn, self.PARAM_NAME, self)
def responses(resp_def):
""" Define responses for the given handlers. """
def decorator(fn): # pylint: disable=missing-docstring
meta = RouteMeta.load(fn)
meta.responses = resp_def
meta.save()
return fn
return decorator
def response(status, response_def):
""" Define a single response for the given handler.
You can define multiple responses for the given handler by using this
decorator multiple times.
"""
def decorator(fn): # pylint: disable=missing-docstring
meta = RouteMeta.load(fn)
meta.set_response(status, response_def)
meta.save()
return fn
return decorator
def response_200(description, array=False, schema=None):
""" A standard HTTP 200 response
A quick helper to easily define a standard 200 response where the response
schema matches the main resource schema for any given restible resource.
"""
return response(200, {
"description": description,
"schema": schema or ("__self_array__" if array else "__self__")
})
def response_201(description, schema=None):
""" A standard HTTP 201 response
A quick helper to easily define a standard 201 response where the response
schema matches the main resource schema for any given restible resource.
"""
return response(201, {
"description": description,
"schema": schema or "__self__",
})
def response_204(description=None):
""" A standard HTTP 201 response
A quick helper to easily define a standard 201 response where the response
schema matches the main resource schema for any given restible resource.
"""
description = description or "Item deleted"
return response(204, {
"description": description,
"schema": {"type": "null"}
})
def response_401(description=None):
""" A standard HTTP 401 response
A quick helper for defining 401 responses. If you're using a custom error
schema you'll have to build those manually. Otherwise you can use this
little helper.
"""
resp_def = dict(util.RESPONSE_404)
if description is not None:
resp_def['description'] = description
return response(401, resp_def)
def response_403(description=None):
""" A standard HTTP 403 response
A quick helper for defining 403 responses. If you're using a custom error
schema you'll have to build those manually. Otherwise you can use this
little helper.
"""
resp_def = dict(util.RESPONSE_404)
if description is not None:
resp_def['description'] = description
return response(403, resp_def)
def response_404(description=None):
""" A standard HTTP 404 response
A quick helper for defining 404 responses. If you're using a custom error
schema you'll have to build those manually. Otherwise you can use this
little helper.
"""
resp_def = dict(util.RESPONSE_404)
if description is not None:
resp_def['description'] = description
return response(404, resp_def)
def response_500(description=None):
""" A standard HTTP 500 response
A quick helper for defining 500 responses. If you're using a custom error
schema you'll have to build those manually. Otherwise you can use this
little helper.
"""
resp_def = dict(util.RESPONSE_404)
if description is not None:
resp_def['description'] = description
return response(403, resp_def)
def route_params(params_def):
""" Define route parameters.
This allows you to define route params for any route. This allows to
document the API at the finest level of detail.
"""
def decorator(fn): # pylint: disable=missing-docstring
meta = RouteMeta.load(fn)
meta.route_params = params_def
meta.save()
return fn
return decorator
# Used only in type hint comments
del FunctionType | /restible-swagger-0.3.3.tar.gz/restible-swagger-0.3.3/src/restible_swagger/decorators.py | 0.901666 | 0.249653 | decorators.py | pypi |
""" Helper for defining API route mappings through config. """
from __future__ import absolute_import, unicode_literals
# stdlib imports
from typing import Any, Dict, List, Type
# 3rd party imports
import attr
from restible import RestResource
# local imports
from .endpoint import EndpointBuilder
@attr.s
class Route(object):
""" Represents a single API route.
A route is just a mapping of URL to resource that handles it. Each route
will generate multiple URLs handled as each resource can handle generic and
detail REST operation as well as all actions defined on the resource.
"""
url = attr.ib(type=str)
resource = attr.ib(type=str)
route_params = attr.ib(type=str)
actions = attr.ib(type=List[int])
_res_cls = attr.ib(type=type, default=None)
@classmethod
def load(cls, url, route_info):
""" Load route instance from configuration item. """
return Route(
url=url,
resource=route_info['resource'],
route_params=route_info['route_params'],
actions=route_info.get('actions', []),
)
@property
def res_cls(self):
# type: () -> Type[RestResource]
""" Return the resource class instance associated with this route. """
if self._res_cls is None:
mod_name, cls_name = self.resource.rsplit('.', 1)
mod = __import__(mod_name, fromlist=[str(cls_name)])
self._res_cls = getattr(mod, cls_name)
return self._res_cls
def build_spec(self):
# type: () -> Dict[str, Any]
""" Extract all OpenAPI specs for paths defined by the resource.
Returns:
Dict[str, Any]:
"""
from restible import api_action
builder = EndpointBuilder(self)
list_path = self.res_cls.name
detail_path = self.res_cls.name + '-detail'
paths = {
list_path: builder.generic_endpoint(),
detail_path: builder.detail_endpoint(),
}
generic = []
detail = []
for action in self.res_cls().rest_actions():
meta = api_action.get_meta(action)
(generic if meta.generic else detail).append(action)
detail_by_name = {}
for action in detail:
meta = api_action.get_meta(action)
actions = detail_by_name.setdefault(meta.name, [])
actions.append(action)
for name, actions in detail_by_name.items():
endp_name = '{res}-detail-{action}'.format(
res=self.res_cls.name,
action=name
)
paths[endp_name] = builder.actions_endpoint(actions)
generic_by_name = {}
for action in generic:
meta = api_action.get_meta(action)
actions = generic_by_name.setdefault(meta.name, [])
actions.append(action)
for name, actions in generic_by_name.items():
endp_name = '{res}-{action}'.format(res=self.res_cls.name, action=name)
paths[endp_name] = builder.actions_endpoint(actions)
return paths
# Used only in type hint comments
del Any, Dict, Type, RestResource | /restible-swagger-0.3.3.tar.gz/restible-swagger-0.3.3/src/restible_swagger/route.py | 0.904386 | 0.173288 | route.py | pypi |
""" Endpoint builder. """
from __future__ import absolute_import, unicode_literals
# stdlib imports
import textwrap
# 3rd party imports
from restible import ModelResource
from six import iteritems, string_types # pylint: disable=wrong-import-order
# local imports
from . import util
from .decorators import RouteMeta
class EndpointBuilder(object):
""" A helper class for building resource endpoints. """
def __init__(self, route):
self.route = route
def actions_endpoint(self, actions):
""" Extract spec for all action endpoints on the resource. """
from restible import api_action
res_cls = self.route.res_cls
name = util.make_name(res_cls.name)
endp = {}
by_method = {}
for action in actions:
meta = api_action.get_meta(action)
for method in meta.methods:
by_method[method] = action
for method, action in by_method.items():
meta = api_action.get_meta(action)
summary, desc = _parse_docstring(action)
endp[method] = {
"tags": [name],
"summary": summary,
"description": desc,
}
responses = self._get_responses(action, self_schema=meta.schema)
if responses is not None:
endp[method]['responses'] = responses
if meta.schema:
endp[method]['parameters'] = [{
"name": "payload",
"in": "body",
"schema": meta.schema,
}]
return endp
def generic_endpoint(self):
""" Extract spec for all generic endpoints on the resource. """
res_cls = self.route.res_cls
resource = res_cls()
res_name = res_cls.name
is_model_res = issubclass(res_cls, ModelResource)
name = util.make_name(res_cls.name)
endpoints = {}
res_methods = {
'create': 'create_item' if is_model_res else 'rest_create',
'query': 'query_items' if is_model_res else 'rest_query',
}
if resource.implements('query'):
res_method = getattr(res_cls, res_methods['query'])
query_summary, query_desc = _parse_docstring(res_method)
responses = self._get_responses(res_method, res_cls.schema, {
"200": {
"description": "A list of {}s".format(res_name),
"schema": "__self_array__",
},
"401": util.RESPONSE_401
})
endpoints["get"] = {
"tags": [name],
"summary": query_summary,
"description": query_desc,
"responses": responses
}
if resource.implements('create'):
res_method = getattr(res_cls, res_methods['create'])
create_summary, create_desc = _parse_docstring(res_method)
responses = self._get_responses(res_method, res_cls.schema, {
"201": {
"description": "{} successfully created".format(name),
"schema": '__self__',
},
"401": util.RESPONSE_401
})
endpoints["post"] = {
"tags": [name],
"summary": create_summary,
"description": create_desc,
"parameters": [
{
"name": res_name,
"in": "body",
"description": "Initial {} data".format(res_name),
"schema": res_cls.schema,
}
],
"responses": responses
}
return endpoints
def detail_endpoint(self):
""" Extract spec for all detail endpoints on the resource. """
res_cls = self.route.res_cls
resource = res_cls()
res_name = res_cls.name
is_model_res = issubclass(res_cls, ModelResource)
name = util.make_name(res_cls.name)
endpoints = {"parameters": res_cls.route_params}
res_methods = {
'get': 'get_item' if is_model_res else 'rest_get',
'update': 'update_item' if is_model_res else 'rest_update',
'delete': 'delete_item' if is_model_res else 'rest_delete',
}
if resource.implements('get'):
res_method = getattr(res_cls, res_methods['get'])
get_summary, get_desc = _parse_docstring(res_method)
responses = self._get_responses(res_method, res_cls.schema, {
"200": {
"description": "A list of {}s".format(res_name),
"schema": '__self_array__',
},
"401": util.RESPONSE_401,
"404": util.RESPONSE_404
})
endpoints["get"] = {
"tags": [name],
"summary": get_summary,
"description": get_desc,
"responses": responses
}
if resource.implements('update'):
res_method = getattr(res_cls, res_methods['update'])
put_summary, put_desc = _parse_docstring(res_method)
responses = self._get_responses(res_method, res_cls.schema, {
"200": {
"description": "An updated {}".format(res_name),
"schema": '__self__',
},
"401": util.RESPONSE_401,
"404": util.RESPONSE_404
})
endpoints["put"] = {
"tags": [name],
"summary": put_summary,
"description": put_desc,
"parameters": [
{
"name": res_name,
"in": "body",
"description": "{} data".format(res_name),
"schema": res_cls.schema,
}
],
"responses": responses
}
if resource.implements('delete'):
res_method = getattr(res_cls, res_methods['delete'])
del_summary, del_desc = _parse_docstring(res_method)
responses = self._get_responses(res_method, res_cls.schema, {
"200": {"description": "Successfully deleted"},
"401": util.RESPONSE_401,
"404": util.RESPONSE_404
})
endpoints["delete"] = {
"tags": [name],
"summary": del_summary,
"description": del_desc,
"responses": responses,
}
return endpoints
def _get_responses(self, handler, self_schema, defaults=None):
route_meta = RouteMeta.load(handler)
responses = route_meta.responses or defaults or {}
for _, resp_spec in iteritems(responses):
schema = resp_spec.get('schema', None)
if isinstance(schema, string_types):
if schema == '__self__':
resp_spec['schema'] = self_schema
elif schema == '__self_array__':
resp_spec['schema'] = {
"type": "array",
"items": self_schema
}
return responses
def _parse_docstring(obj):
if obj.__doc__ is None:
return None, None
docstring = obj.__doc__.strip()
parts = docstring.split('\n\n', 1)
summary = parts[0]
if len(parts) == 2:
desc = parts[1]
else:
desc = parts[0]
desc = textwrap.dedent(desc)
return summary, desc | /restible-swagger-0.3.3.tar.gz/restible-swagger-0.3.3/src/restible_swagger/endpoint.py | 0.78842 | 0.164785 | endpoint.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.